Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
nexedi
linux
Commits
3e446c25
Commit
3e446c25
authored
May 06, 2003
by
Sridhar Samudrala
Browse files
Options
Browse Files
Download
Plain Diff
Manual merge.
parents
efeed5ae
09593bb6
Changes
35
Hide whitespace changes
Inline
Side-by-side
Showing
35 changed files
with
2934 additions
and
2015 deletions
+2934
-2015
include/net/sctp/command.h
include/net/sctp/command.h
+1
-1
include/net/sctp/constants.h
include/net/sctp/constants.h
+30
-26
include/net/sctp/sctp.h
include/net/sctp/sctp.h
+90
-56
include/net/sctp/sm.h
include/net/sctp/sm.h
+65
-75
include/net/sctp/structs.h
include/net/sctp/structs.h
+174
-99
include/net/sctp/tsnmap.h
include/net/sctp/tsnmap.h
+1
-1
include/net/sctp/ulpevent.h
include/net/sctp/ulpevent.h
+22
-15
include/net/sctp/ulpqueue.h
include/net/sctp/ulpqueue.h
+4
-3
include/net/sctp/user.h
include/net/sctp/user.h
+4
-0
net/sctp/Kconfig
net/sctp/Kconfig
+37
-10
net/sctp/Makefile
net/sctp/Makefile
+2
-3
net/sctp/adler32.c
net/sctp/adler32.c
+27
-18
net/sctp/associola.c
net/sctp/associola.c
+88
-59
net/sctp/bind_addr.c
net/sctp/bind_addr.c
+42
-15
net/sctp/chunk.c
net/sctp/chunk.c
+327
-0
net/sctp/command.c
net/sctp/command.c
+2
-2
net/sctp/crc32c.c
net/sctp/crc32c.c
+17
-0
net/sctp/endpointola.c
net/sctp/endpointola.c
+46
-50
net/sctp/input.c
net/sctp/input.c
+54
-55
net/sctp/inqueue.c
net/sctp/inqueue.c
+10
-10
net/sctp/ipv6.c
net/sctp/ipv6.c
+52
-21
net/sctp/objcnt.c
net/sctp/objcnt.c
+2
-0
net/sctp/output.c
net/sctp/output.c
+83
-80
net/sctp/outqueue.c
net/sctp/outqueue.c
+296
-331
net/sctp/primitive.c
net/sctp/primitive.c
+2
-2
net/sctp/protocol.c
net/sctp/protocol.c
+42
-17
net/sctp/sm_make_chunk.c
net/sctp/sm_make_chunk.c
+265
-309
net/sctp/sm_sideeffect.c
net/sctp/sm_sideeffect.c
+105
-69
net/sctp/sm_statefuns.c
net/sctp/sm_statefuns.c
+355
-339
net/sctp/socket.c
net/sctp/socket.c
+552
-260
net/sctp/ssnmap.c
net/sctp/ssnmap.c
+2
-2
net/sctp/transport.c
net/sctp/transport.c
+32
-22
net/sctp/tsnmap.c
net/sctp/tsnmap.c
+2
-3
net/sctp/ulpevent.c
net/sctp/ulpevent.c
+48
-38
net/sctp/ulpqueue.c
net/sctp/ulpqueue.c
+53
-24
No files found.
include/net/sctp/command.h
View file @
3e446c25
...
...
@@ -182,7 +182,7 @@ typedef struct {
/* Create a new sctp_command_sequence.
* Return NULL if creating a new sequence fails.
*/
sctp_cmd_seq_t
*
sctp_new_cmd_seq
(
int
priority
);
sctp_cmd_seq_t
*
sctp_new_cmd_seq
(
int
gfp
);
/* Initialize a block of memory as a command sequence.
* Return 0 if the initialization fails.
...
...
include/net/sctp/constants.h
View file @
3e446c25
...
...
@@ -6,46 +6,42 @@
*
* This file is part of the SCTP kernel reference Implementation
*
* This file is part of the implementation of the add-IP extension,
* based on <draft-ietf-tsvwg-addip-sctp-02.txt> June 29, 2001,
* for the SCTP kernel reference Implementation.
*
* The SCTP reference implementation is free software;
* The SCTP reference implementation is free software;
* you can redistribute it and/or modify it under the terms of
* the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
*
the SCTP reference implementation
is distributed in the hope that it
* will be useful, but WITHOUT ANY WARRANTY; without even the implied
warranty
*
The SCTP reference implementation
is distributed in the hope that it
* will be useful, but WITHOUT ANY WARRANTY; without even the implied
* ************************
*
of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
*
See the
GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with GNU CC; see the file COPYING. If not, write to
* the Free Software Foundation, 59 Temple Place - Suite 330,
* Boston, MA 02111-1307, USA.
*
* Please send any bug reports or fixes you make to one of the following email
* addresses:
* Please send any bug reports or fixes you make to the
* email address(es):
* lksctp developers <lksctp-developers@lists.sourceforge.net>
*
* Or submit a bug report through the following website:
* http://www.sf.net/projects/lksctp
*
* La Monte H.P. Yarroll <piggy@acm.org>
* Karl Knutson <karl@athena.chicago.il.us>
* Randall Stewart <randall@stewart.chicago.il.us>
* Ken Morneau <kmorneau@cisco.com>
* Qiaobing Xie <qxie1@motorola.com>
* Xingang Guo <xingang.guo@intel.com>
* Sridhar Samudrala <samudrala@us.ibm.com>
* Daisy Chang <daisyc@us.ibm.com>
* Written or modified by:
* La Monte H.P. Yarroll <piggy@acm.org>
* Karl Knutson <karl@athena.chicago.il.us>
* Randall Stewart <randall@stewart.chicago.il.us>
* Ken Morneau <kmorneau@cisco.com>
* Qiaobing Xie <qxie1@motorola.com>
* Xingang Guo <xingang.guo@intel.com>
* Sridhar Samudrala <samudrala@us.ibm.com>
* Daisy Chang <daisyc@us.ibm.com>
*
* Any bugs reported given to us we will try to fix... any fixes shared will
* be incorporated into the next SCTP release.
*
* There are still LOTS of bugs in this code... I always run on the motto
* "it is a wonder any code ever works :)"
*
*
*/
#ifndef __sctp_constants_h__
...
...
@@ -220,7 +216,7 @@ typedef enum {
* - A socket in SCTP_SS_LISTENING state indicates that it is willing to
* accept new associations, but cannot initiate the creation of new ones.
* - A socket in SCTP_SS_ESTABLISHED state indicates that it has a single
* association
in ESTABLISHED state
.
* association.
*/
typedef
enum
{
SCTP_SS_CLOSED
=
TCP_CLOSE
,
...
...
@@ -336,10 +332,18 @@ typedef enum {
#define SCTP_SIGNATURE_SIZE 20
/* size of a SLA-1 signature */
#define SCTP_COOKIE_MULTIPLE
64
/* Pad out our cookie to make our hash
#define SCTP_COOKIE_MULTIPLE
32
/* Pad out our cookie to make our hash
* functions simpler to write.
*/
#if defined (CONFIG_SCTP_HMAC_MD5)
#define SCTP_COOKIE_HMAC_ALG "md5"
#elif defined (CONFIG_SCTP_HMAC_SHA1)
#define SCTP_COOKIE_HMAC_ALG "sha1"
#else
#define SCTP_COOKIE_HMAC_ALG NULL
#endif
/* These return values describe the success or failure of a number of
* routines which form the lower interface to SCTP_outqueue.
*/
...
...
include/net/sctp/sctp.h
View file @
3e446c25
...
...
@@ -125,65 +125,61 @@ extern struct sctp_protocol sctp_proto;
extern
struct
sock
*
sctp_get_ctl_sock
(
void
);
extern
int
sctp_copy_local_addr_list
(
struct
sctp_protocol
*
,
struct
sctp_bind_addr
*
,
sctp_scope_t
,
int
priority
,
int
flags
);
sctp_scope_t
,
int
gfp
,
int
flags
);
extern
struct
sctp_pf
*
sctp_get_pf_specific
(
sa_family_t
family
);
extern
int
sctp_register_pf
(
struct
sctp_pf
*
,
sa_family_t
);
/*
* sctp/socket.c
*/
extern
int
sctp_backlog_rcv
(
struct
sock
*
sk
,
struct
sk_buff
*
skb
);
extern
int
sctp_inet_listen
(
struct
socket
*
sock
,
int
backlog
);
extern
void
sctp_write_space
(
struct
sock
*
sk
);
extern
unsigned
int
sctp_poll
(
struct
file
*
file
,
struct
socket
*
sock
,
int
sctp_backlog_rcv
(
struct
sock
*
sk
,
struct
sk_buff
*
skb
);
int
sctp_inet_listen
(
struct
socket
*
sock
,
int
backlog
);
void
sctp_write_space
(
struct
sock
*
sk
);
unsigned
int
sctp_poll
(
struct
file
*
file
,
struct
socket
*
sock
,
poll_table
*
wait
);
/*
* sctp/primitive.c
*/
extern
int
sctp_primitive_ASSOCIATE
(
sctp_association_t
*
,
void
*
arg
);
extern
int
sctp_primitive_SHUTDOWN
(
sctp_association_t
*
,
void
*
arg
);
extern
int
sctp_primitive_ABORT
(
sctp_association_t
*
,
void
*
arg
);
extern
int
sctp_primitive_SEND
(
sctp_association_t
*
,
void
*
arg
);
extern
int
sctp_primitive_REQUESTHEARTBEAT
(
sctp_association_t
*
,
void
*
arg
);
int
sctp_primitive_ASSOCIATE
(
struct
sctp_association
*
,
void
*
arg
);
int
sctp_primitive_SHUTDOWN
(
struct
sctp_association
*
,
void
*
arg
);
int
sctp_primitive_ABORT
(
struct
sctp_association
*
,
void
*
arg
);
int
sctp_primitive_SEND
(
struct
sctp_association
*
,
void
*
arg
);
int
sctp_primitive_REQUESTHEARTBEAT
(
struct
sctp_association
*
,
void
*
arg
);
/*
* sctp/crc32c.c
*/
extern
__u32
sctp_start_cksum
(
__u8
*
ptr
,
__u16
count
);
extern
__u32
sctp_update_cksum
(
__u8
*
ptr
,
__u16
count
,
__u32
cksum
);
extern
__u32
sctp_end_cksum
(
__u32
cksum
);
__u32
sctp_start_cksum
(
__u8
*
ptr
,
__u16
count
);
__u32
sctp_update_cksum
(
__u8
*
ptr
,
__u16
count
,
__u32
cksum
);
__u32
sctp_end_cksum
(
__u32
cksum
);
__u32
sctp_update_copy_cksum
(
__u8
*
,
__u8
*
,
__u16
count
,
__u32
cksum
);
/*
* sctp/input.c
*/
extern
int
sctp_rcv
(
struct
sk_buff
*
skb
);
extern
void
sctp_v4_err
(
struct
sk_buff
*
skb
,
u32
info
);
extern
void
sctp_hash_established
(
sctp_association_t
*
);
extern
void
__sctp_hash_established
(
sctp_association_t
*
);
extern
void
sctp_unhash_established
(
sctp_association_t
*
);
extern
void
__sctp_unhash_established
(
sctp_association_t
*
);
extern
void
sctp_hash_endpoint
(
sctp_endpoint_t
*
);
extern
void
__sctp_hash_endpoint
(
sctp_endpoint_t
*
);
extern
void
sctp_unhash_endpoint
(
sctp_endpoint_t
*
);
extern
void
__sctp_unhash_endpoint
(
sctp_endpoint_t
*
);
extern
sctp_association_t
*
__sctp_lookup_association
(
const
union
sctp_addr
*
,
const
union
sctp_addr
*
,
struct
sctp_transport
**
);
extern
struct
sock
*
sctp_err_lookup
(
int
family
,
struct
sk_buff
*
,
struct
sctphdr
*
,
struct
sctp_endpoint
**
,
struct
sctp_association
**
,
struct
sctp_transport
**
);
extern
void
sctp_err_finish
(
struct
sock
*
,
struct
sctp_endpoint
*
,
int
sctp_rcv
(
struct
sk_buff
*
skb
);
void
sctp_v4_err
(
struct
sk_buff
*
skb
,
u32
info
);
void
sctp_hash_established
(
struct
sctp_association
*
);
void
__sctp_hash_established
(
struct
sctp_association
*
);
void
sctp_unhash_established
(
struct
sctp_association
*
);
void
__sctp_unhash_established
(
struct
sctp_association
*
);
void
sctp_hash_endpoint
(
struct
sctp_endpoint
*
);
void
__sctp_hash_endpoint
(
struct
sctp_endpoint
*
);
void
sctp_unhash_endpoint
(
struct
sctp_endpoint
*
);
void
__sctp_unhash_endpoint
(
struct
sctp_endpoint
*
);
struct
sctp_association
*
__sctp_lookup_association
(
const
union
sctp_addr
*
,
const
union
sctp_addr
*
,
struct
sctp_transport
**
);
struct
sock
*
sctp_err_lookup
(
int
family
,
struct
sk_buff
*
,
struct
sctphdr
*
,
struct
sctp_endpoint
**
,
struct
sctp_association
**
,
struct
sctp_transport
**
);
void
sctp_err_finish
(
struct
sock
*
,
struct
sctp_endpoint
*
,
struct
sctp_association
*
);
extern
void
sctp_icmp_frag_needed
(
struct
sock
*
,
struct
sctp_association
*
,
struct
sctp_transport
*
t
,
__u32
pmtu
);
/*
* sctp/hashdriver.c
*/
extern
void
sctp_hash_digest
(
const
char
*
secret
,
const
int
secret_len
,
const
char
*
text
,
const
int
text_len
,
__u8
*
digest
);
void
sctp_icmp_frag_needed
(
struct
sock
*
,
struct
sctp_association
*
,
struct
sctp_transport
*
t
,
__u32
pmtu
);
/*
* Section: Macros, externs, and inlines
...
...
@@ -281,6 +277,7 @@ extern atomic_t sctp_dbg_objcnt_chunk;
extern
atomic_t
sctp_dbg_objcnt_bind_addr
;
extern
atomic_t
sctp_dbg_objcnt_addr
;
extern
atomic_t
sctp_dbg_objcnt_ssnmap
;
extern
atomic_t
sctp_dbg_objcnt_datamsg
;
/* Macros to atomically increment/decrement objcnt counters. */
#define SCTP_DBG_OBJCNT_INC(name) \
...
...
@@ -296,8 +293,8 @@ atomic_t sctp_dbg_objcnt_## name = ATOMIC_INIT(0)
#define SCTP_DBG_OBJCNT_ENTRY(name) \
{.label= #name, .counter= &sctp_dbg_objcnt_## name}
extern
void
sctp_dbg_objcnt_init
(
void
);
extern
void
sctp_dbg_objcnt_exit
(
void
);
void
sctp_dbg_objcnt_init
(
void
);
void
sctp_dbg_objcnt_exit
(
void
);
#else
...
...
@@ -310,8 +307,8 @@ static inline void sctp_dbg_objcnt_exit(void) { return; }
#endif
/* CONFIG_SCTP_DBG_OBJCOUNT */
#if defined CONFIG_SYSCTL
extern
void
sctp_sysctl_register
(
void
);
extern
void
sctp_sysctl_unregister
(
void
);
void
sctp_sysctl_register
(
void
);
void
sctp_sysctl_unregister
(
void
);
#else
static
inline
void
sctp_sysctl_register
(
void
)
{
return
;
}
static
inline
void
sctp_sysctl_unregister
(
void
)
{
return
;
}
...
...
@@ -322,9 +319,9 @@ static inline void sctp_sysctl_unregister(void) { return; }
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
extern
int
sctp_v6_init
(
void
);
extern
void
sctp_v6_exit
(
void
);
extern
void
sctp_v6_err
(
struct
sk_buff
*
skb
,
struct
inet6_skb_parm
*
opt
,
int
sctp_v6_init
(
void
);
void
sctp_v6_exit
(
void
);
void
sctp_v6_err
(
struct
sk_buff
*
skb
,
struct
inet6_skb_parm
*
opt
,
int
type
,
int
code
,
int
offset
,
__u32
info
);
#else
/* #ifdef defined(CONFIG_IPV6) */
...
...
@@ -334,15 +331,26 @@ static inline void sctp_v6_exit(void) { return; }
#endif
/* #if defined(CONFIG_IPV6) */
/* Some wrappers, in case crypto not available. */
#if defined (CONFIG_CRYPTO_HMAC)
#define sctp_crypto_alloc_tfm crypto_alloc_tfm
#define sctp_crypto_free_tfm crypto_free_tfm
#define sctp_crypto_hmac crypto_hmac
#else
#define sctp_crypto_alloc_tfm(x...) NULL
#define sctp_crypto_free_tfm(x...)
#define sctp_crypto_hmac(x...)
#endif
/* Map an association to an assoc_id. */
static
inline
sctp_assoc_t
sctp_assoc2id
(
const
s
ctp_association_t
*
asoc
)
static
inline
sctp_assoc_t
sctp_assoc2id
(
const
s
truct
sctp_association
*
asoc
)
{
return
(
sctp_assoc_t
)
asoc
;
}
/* Look up the association by its id. */
s
ctp_association_t
*
sctp_id2assoc
(
struct
sock
*
sk
,
sctp_assoc_t
id
);
s
truct
sctp_association
*
sctp_id2assoc
(
struct
sock
*
sk
,
sctp_assoc_t
id
);
/* A macro to walk a list of skbs. */
...
...
@@ -422,12 +430,16 @@ static inline __s32 sctp_jitter(__u32 rto)
}
/* Break down data chunks at this point. */
static
inline
int
sctp_frag_point
(
int
pmtu
)
static
inline
int
sctp_frag_point
(
const
struct
sctp_opt
*
sp
,
int
pmtu
)
{
pmtu
-=
SCTP_IP_OVERHEAD
+
sizeof
(
struct
sctp_data_chunk
);
pmtu
-=
sizeof
(
struct
sctp_sack_chunk
);
int
frag
=
pmtu
;
frag
-=
SCTP_IP_OVERHEAD
+
sizeof
(
struct
sctp_data_chunk
);
frag
-=
sizeof
(
struct
sctp_sack_chunk
);
return
pmtu
;
if
(
sp
->
user_frag
)
frag
=
min_t
(
int
,
frag
,
sp
->
user_frag
);
return
frag
;
}
/* Walk through a list of TLV parameters. Don't trust the
...
...
@@ -475,7 +487,7 @@ static inline void tv_add(const struct timeval *entered, struct timeval *leaved)
extern
struct
proto
sctp_prot
;
extern
struct
proc_dir_entry
*
proc_net_sctp
;
extern
void
sctp_put_port
(
struct
sock
*
sk
);
void
sctp_put_port
(
struct
sock
*
sk
);
/* Static inline functions. */
...
...
@@ -501,10 +513,10 @@ static inline int ipver2af(__u8 ipver)
/* Perform some sanity checks. */
static
inline
int
sctp_sanity_check
(
void
)
{
SCTP_ASSERT
(
sizeof
(
struct
sctp_ulpevent
)
<=
SCTP_ASSERT
(
sizeof
(
struct
sctp_ulpevent
)
<=
sizeof
(((
struct
sk_buff
*
)
0
)
->
cb
),
"SCTP: ulpevent does not fit in skb!
\n
"
,
return
0
);
return
1
;
}
...
...
@@ -566,4 +578,26 @@ struct sctp6_sock {
#define sctp_sk(__sk) (&((struct sctp_sock *)__sk)->sctp)
/* Is a socket of this style? */
#define sctp_style(sk, style) __sctp_style((sk), (SCTP_SOCKET_##style))
int
static
inline
__sctp_style
(
const
struct
sock
*
sk
,
sctp_socket_type_t
style
)
{
return
sctp_sk
(
sk
)
->
type
==
style
;
}
/* Is the association in this state? */
#define sctp_state(asoc, state) __sctp_state((asoc), (SCTP_STATE_##state))
int
static
inline
__sctp_state
(
const
struct
sctp_association
*
asoc
,
sctp_state_t
state
)
{
return
asoc
->
state
==
state
;
}
/* Is the socket in this state? */
#define sctp_sstate(sk, state) __sctp_sstate((sk), (SCTP_SS_##state))
int
static
inline
__sctp_sstate
(
const
struct
sock
*
sk
,
sctp_sock_state_t
state
)
{
return
sk
->
state
==
state
;
}
#endif
/* __net_sctp_h__ */
include/net/sctp/sm.h
View file @
3e446c25
...
...
@@ -6,10 +6,6 @@
*
* This file is part of the SCTP kernel reference Implementation
*
* This file is part of the implementation of the add-IP extension,
* based on <draft-ietf-tsvwg-addip-sctp-02.txt> June 29, 2001,
* for the SCTP kernel reference Implementation.
*
* These are definitions needed by the state machine.
*
* The SCTP reference implementation is free software;
...
...
@@ -50,7 +46,6 @@
* be incorporated into the next SCTP release.
*/
#include <linux/types.h>
#include <linux/compiler.h>
#include <linux/slab.h>
...
...
@@ -81,8 +76,8 @@ typedef struct {
int
action
;
}
sctp_sm_command_t
;
typedef
sctp_disposition_t
(
sctp_state_fn_t
)
(
const
s
ctp_endpoint_
t
*
,
const
s
ctp_association_t
*
,
typedef
sctp_disposition_t
(
sctp_state_fn_t
)
(
const
s
truct
sctp_endpoin
t
*
,
const
s
truct
sctp_association
*
,
const
sctp_subtype_t
type
,
void
*
arg
,
sctp_cmd_seq_t
*
);
...
...
@@ -209,109 +204,104 @@ __u32 sctp_generate_verification_tag(void);
void
sctp_populate_tie_tags
(
__u8
*
cookie
,
__u32
curTag
,
__u32
hisTag
);
/* Prototypes for chunk-building functions. */
s
ctp_chunk_t
*
sctp_make_init
(
const
sctp_association_t
*
,
const
s
ctp_bind_addr_t
*
,
int
priority
,
int
vparam_len
);
s
ctp_chunk_t
*
sctp_make_init_ack
(
const
sctp_association_t
*
,
const
s
ctp_chunk_t
*
,
const
int
priority
,
s
truct
sctp_chunk
*
sctp_make_init
(
const
struct
sctp_association
*
,
const
s
truct
sctp_bind_addr
*
,
int
gfp
,
int
vparam_len
);
s
truct
sctp_chunk
*
sctp_make_init_ack
(
const
struct
sctp_association
*
,
const
s
truct
sctp_chunk
*
,
const
int
gfp
,
const
int
unkparam_len
);
s
ctp_chunk_t
*
sctp_make_cookie_echo
(
const
sctp_association_t
*
,
const
s
ctp_chunk_t
*
);
s
ctp_chunk_t
*
sctp_make_cookie_ack
(
const
sctp_association_t
*
,
const
s
ctp_chunk_t
*
);
s
ctp_chunk_t
*
sctp_make_cwr
(
const
sctp_association_t
*
,
s
truct
sctp_chunk
*
sctp_make_cookie_echo
(
const
struct
sctp_association
*
,
const
s
truct
sctp_chunk
*
);
s
truct
sctp_chunk
*
sctp_make_cookie_ack
(
const
struct
sctp_association
*
,
const
s
truct
sctp_chunk
*
);
s
truct
sctp_chunk
*
sctp_make_cwr
(
const
struct
sctp_association
*
,
const
__u32
lowest_tsn
,
const
s
ctp_chunk_t
*
);
s
ctp_chunk_t
*
sctp_make_datafrag
(
sctp_association_t
*
,
const
s
truct
sctp_chunk
*
);
s
truct
sctp_chunk
*
sctp_make_datafrag
(
struct
sctp_association
*
,
const
struct
sctp_sndrcvinfo
*
sinfo
,
int
len
,
const
__u8
*
data
,
__u8
flags
,
__u16
ssn
);
s
ctp_chunk_t
*
sctp_make_datafrag_empty
(
sctp_association_t
*
,
s
truct
sctp_chunk
*
sctp_make_datafrag_empty
(
struct
sctp_association
*
,
const
struct
sctp_sndrcvinfo
*
sinfo
,
int
len
,
const
__u8
flags
,
__u16
ssn
);
s
ctp_chunk_t
*
sctp_make_data
(
sctp_association_t
*
,
s
truct
sctp_chunk
*
sctp_make_data
(
struct
sctp_association
*
,
const
struct
sctp_sndrcvinfo
*
sinfo
,
int
len
,
const
__u8
*
data
);
s
ctp_chunk_t
*
sctp_make_data_empty
(
sctp_association_t
*
,
s
truct
sctp_chunk
*
sctp_make_data_empty
(
struct
sctp_association
*
,
const
struct
sctp_sndrcvinfo
*
,
int
len
);
s
ctp_chunk_t
*
sctp_make_ecne
(
const
sctp_association_t
*
,
s
truct
sctp_chunk
*
sctp_make_ecne
(
const
struct
sctp_association
*
,
const
__u32
);
s
ctp_chunk_t
*
sctp_make_sack
(
const
sctp_association_t
*
);
s
ctp_chunk_t
*
sctp_make_shutdown
(
const
sctp_association_t
*
asoc
);
s
ctp_chunk_t
*
sctp_make_shutdown_ack
(
const
sctp_association_t
*
asoc
,
const
s
ctp_chunk_t
*
);
s
ctp_chunk_t
*
sctp_make_shutdown_complete
(
const
sctp_association_t
*
,
const
s
ctp_chunk_t
*
);
void
sctp_init_cause
(
s
ctp_chunk_t
*
,
__u16
cause
,
const
void
*
,
size_t
);
s
ctp_chunk_t
*
sctp_make_abort
(
const
sctp_association_t
*
,
const
s
ctp_chunk_t
*
,
s
truct
sctp_chunk
*
sctp_make_sack
(
const
struct
sctp_association
*
);
s
truct
sctp_chunk
*
sctp_make_shutdown
(
const
struct
sctp_association
*
asoc
);
s
truct
sctp_chunk
*
sctp_make_shutdown_ack
(
const
struct
sctp_association
*
asoc
,
const
s
truct
sctp_chunk
*
);
s
truct
sctp_chunk
*
sctp_make_shutdown_complete
(
const
struct
sctp_association
*
,
const
s
truct
sctp_chunk
*
);
void
sctp_init_cause
(
s
truct
sctp_chunk
*
,
__u16
cause
,
const
void
*
,
size_t
);
s
truct
sctp_chunk
*
sctp_make_abort
(
const
struct
sctp_association
*
,
const
s
truct
sctp_chunk
*
,
const
size_t
hint
);
s
ctp_chunk_t
*
sctp_make_abort_no_data
(
const
sctp_association_t
*
,
const
s
ctp_chunk_t
*
,
s
truct
sctp_chunk
*
sctp_make_abort_no_data
(
const
struct
sctp_association
*
,
const
s
truct
sctp_chunk
*
,
__u32
tsn
);
s
ctp_chunk_t
*
sctp_make_abort_user
(
const
sctp_association_t
*
,
const
s
ctp_chunk_t
*
,
s
truct
sctp_chunk
*
sctp_make_abort_user
(
const
struct
sctp_association
*
,
const
s
truct
sctp_chunk
*
,
const
struct
msghdr
*
);
s
ctp_chunk_t
*
sctp_make_heartbeat
(
const
sctp_association_t
*
,
s
truct
sctp_chunk
*
sctp_make_heartbeat
(
const
struct
sctp_association
*
,
const
struct
sctp_transport
*
,
const
void
*
payload
,
const
size_t
paylen
);
s
ctp_chunk_t
*
sctp_make_heartbeat_ack
(
const
sctp_association_t
*
,
const
s
ctp_chunk_t
*
,
s
truct
sctp_chunk
*
sctp_make_heartbeat_ack
(
const
struct
sctp_association
*
,
const
s
truct
sctp_chunk
*
,
const
void
*
payload
,
const
size_t
paylen
);
s
ctp_chunk_t
*
sctp_make_op_error
(
const
sctp_association_t
*
,
const
s
ctp_chunk_t
*
chunk
,
s
truct
sctp_chunk
*
sctp_make_op_error
(
const
struct
sctp_association
*
,
const
s
truct
sctp_chunk
*
chunk
,
__u16
cause_code
,
const
void
*
payload
,
size_t
paylen
);
void
sctp_chunk_assign_tsn
(
sctp_chunk_t
*
);
void
sctp_chunk_assign_ssn
(
sctp_chunk_t
*
);
int
sctp_datachunks_from_user
(
sctp_association_t
*
,
const
struct
sctp_sndrcvinfo
*
,
struct
msghdr
*
,
int
len
,
struct
sk_buff_head
*
);
void
sctp_chunk_assign_tsn
(
struct
sctp_chunk
*
);
void
sctp_chunk_assign_ssn
(
struct
sctp_chunk
*
);
/* Prototypes for statetable processing. */
int
sctp_do_sm
(
sctp_event_t
event_type
,
sctp_subtype_t
subtype
,
sctp_state_t
state
,
s
ctp_endpoint_
t
*
,
s
ctp_association_t
*
asoc
,
s
truct
sctp_endpoin
t
*
,
s
truct
sctp_association
*
asoc
,
void
*
event_arg
,
int
priority
);
int
gfp
);
int
sctp_side_effects
(
sctp_event_t
event_type
,
sctp_subtype_t
subtype
,
sctp_state_t
state
,
s
ctp_endpoint_
t
*
,
s
ctp_association_t
*
asoc
,
s
truct
sctp_endpoin
t
*
,
s
truct
sctp_association
*
asoc
,
void
*
event_arg
,
sctp_disposition_t
status
,
sctp_cmd_seq_t
*
commands
,
int
priority
);
int
gfp
);
/* 2nd level prototypes */
int
sctp_cmd_interpreter
(
sctp_event_t
event_type
,
sctp_subtype_t
subtype
,
sctp_state_t
state
,
s
ctp_endpoint_
t
*
ep
,
s
ctp_association_t
*
asoc
,
s
truct
sctp_endpoin
t
*
ep
,
s
truct
sctp_association
*
asoc
,
void
*
event_arg
,
sctp_disposition_t
status
,
sctp_cmd_seq_t
*
retval
,
int
priority
);
int
gfp
);
int
sctp_gen_sack
(
s
ctp_association_t
*
,
int
force
,
sctp_cmd_seq_t
*
);
void
sctp_do_TSNdup
(
s
ctp_association_t
*
,
sctp_chunk_t
*
,
long
gap
);
int
sctp_gen_sack
(
s
truct
sctp_association
*
,
int
force
,
sctp_cmd_seq_t
*
);
void
sctp_do_TSNdup
(
s
truct
sctp_association
*
,
struct
sctp_chunk
*
,
long
gap
);
void
sctp_generate_t3_rtx_event
(
unsigned
long
peer
);
void
sctp_generate_heartbeat_event
(
unsigned
long
peer
);
sctp_sackhdr_t
*
sctp_sm_pull_sack
(
s
ctp_chunk_t
*
);
sctp_sackhdr_t
*
sctp_sm_pull_sack
(
s
truct
sctp_chunk
*
);
struct
sctp_packet
*
sctp_abort_pkt_new
(
const
struct
sctp_endpoint
*
,
const
struct
sctp_association
*
,
struct
sctp_chunk
*
chunk
,
...
...
@@ -325,21 +315,21 @@ sctp_cookie_param_t *
sctp_pack_cookie
(
const
struct
sctp_endpoint
*
,
const
struct
sctp_association
*
,
const
struct
sctp_chunk
*
,
int
*
cookie_len
,
const
__u8
*
,
int
addrs_len
);
s
ctp_association_t
*
sctp_unpack_cookie
(
const
sctp_endpoint_
t
*
,
const
s
ctp_association_t
*
,
s
ctp_chunk_t
*
,
int
priority
,
int
*
err
,
s
ctp_chunk_t
**
err_chk_p
);
int
sctp_addip_addr_config
(
s
ctp_association_t
*
,
sctp_param_t
,
s
truct
sctp_association
*
sctp_unpack_cookie
(
const
struct
sctp_endpoin
t
*
,
const
s
truct
sctp_association
*
,
s
truct
sctp_chunk
*
,
int
gfp
,
int
*
err
,
s
truct
sctp_chunk
**
err_chk_p
);
int
sctp_addip_addr_config
(
s
truct
sctp_association
*
,
sctp_param_t
,
struct
sockaddr_storage
*
,
int
);
void
sctp_send_stale_cookie_err
(
const
s
ctp_endpoint_
t
*
ep
,
const
s
ctp_association_t
*
asoc
,
const
s
ctp_chunk_t
*
chunk
,
void
sctp_send_stale_cookie_err
(
const
s
truct
sctp_endpoin
t
*
ep
,
const
s
truct
sctp_association
*
asoc
,
const
s
truct
sctp_chunk
*
chunk
,
sctp_cmd_seq_t
*
commands
,
s
ctp_chunk_t
*
err_chunk
);
s
truct
sctp_chunk
*
err_chunk
);
/* 3rd level prototypes */
__u32
sctp_generate_tag
(
const
s
ctp_endpoint_
t
*
);
__u32
sctp_generate_tsn
(
const
s
ctp_endpoint_
t
*
);
__u32
sctp_generate_tag
(
const
s
truct
sctp_endpoin
t
*
);
__u32
sctp_generate_tsn
(
const
s
truct
sctp_endpoin
t
*
);
/* 4th level prototypes */
void
sctp_param2sockaddr
(
union
sctp_addr
*
addr
,
sctp_addr_param_t
*
,
...
...
@@ -361,7 +351,7 @@ extern sctp_timer_event_t *sctp_timer_events[SCTP_NUM_TIMEOUT_TYPES];
/* Get the size of a DATA chunk payload. */
static
inline
__u16
sctp_data_size
(
s
ctp_chunk_t
*
chunk
)
static
inline
__u16
sctp_data_size
(
s
truct
sctp_chunk
*
chunk
)
{
__u16
size
;
...
...
@@ -449,8 +439,8 @@ static inline void sctp_add_cmd_sf(sctp_cmd_seq_t *seq, sctp_verb_t verb, sctp_a
* tag and the T bit is set in the Chunk Flags.
*/
static
inline
int
sctp_vtag_verify_either
(
const
s
ctp_chunk_t
*
chunk
,
const
s
ctp_association_t
*
asoc
)
sctp_vtag_verify_either
(
const
s
truct
sctp_chunk
*
chunk
,
const
s
truct
sctp_association
*
asoc
)
{
/* RFC 2960 Section 8.5.1, sctpimpguide-06 Section 2.13.2
*
...
...
include/net/sctp/structs.h
View file @
3e446c25
...
...
@@ -70,7 +70,6 @@ union sctp_addr {
struct
sockaddr
sa
;
};
/* Forward declarations for data structures. */
struct
sctp_protocol
;
struct
sctp_endpoint
;
...
...
@@ -83,14 +82,9 @@ struct sctp_outq;
struct
sctp_bind_addr
;
struct
sctp_ulpq
;
struct
sctp_opt
;
struct
sctp_e
ndpoint
_common
;
struct
sctp_e
p
_common
;
struct
sctp_ssnmap
;
typedef
struct
sctp_endpoint
sctp_endpoint_t
;
typedef
struct
sctp_association
sctp_association_t
;
typedef
struct
sctp_chunk
sctp_chunk_t
;
typedef
struct
sctp_bind_addr
sctp_bind_addr_t
;
typedef
struct
sctp_endpoint_common
sctp_endpoint_common_t
;
#include <net/sctp/tsnmap.h>
#include <net/sctp/ulpevent.h>
...
...
@@ -114,7 +108,7 @@ typedef struct sctp_bind_hashbucket {
/* Used for hashing all associations. */
typedef
struct
sctp_hashbucket
{
rwlock_t
lock
;
s
ctp_endpoint_common_t
*
chain
;
s
truct
sctp_ep_common
*
chain
;
}
sctp_hashbucket_t
__attribute__
((
__aligned__
(
8
)));
...
...
@@ -235,7 +229,9 @@ struct sctp_af {
int
saddr
);
void
(
*
from_sk
)
(
union
sctp_addr
*
,
struct
sock
*
sk
);
void
(
*
to_sk
)
(
union
sctp_addr
*
,
void
(
*
to_sk_saddr
)
(
union
sctp_addr
*
,
struct
sock
*
sk
);
void
(
*
to_sk_daddr
)
(
union
sctp_addr
*
,
struct
sock
*
sk
);
int
(
*
addr_valid
)
(
union
sctp_addr
*
);
sctp_scope_t
(
*
scope
)
(
union
sctp_addr
*
);
...
...
@@ -243,6 +239,7 @@ struct sctp_af {
int
(
*
is_any
)
(
const
union
sctp_addr
*
);
int
(
*
available
)
(
const
union
sctp_addr
*
);
int
(
*
skb_iif
)
(
const
struct
sk_buff
*
sk
);
int
(
*
is_ce
)
(
const
struct
sk_buff
*
sk
);
__u16
net_header_len
;
int
sockaddr_len
;
sa_family_t
sa_family
;
...
...
@@ -283,8 +280,11 @@ struct sctp_opt {
/* PF_ family specific functions. */
struct
sctp_pf
*
pf
;
/* Access to HMAC transform. */
struct
crypto_tfm
*
hmac
;
/* What is our base endpointer? */
s
ctp_endpoint_
t
*
ep
;
s
truct
sctp_endpoin
t
*
ep
;
/* Various Socket Options. */
__u16
default_stream
;
...
...
@@ -293,10 +293,12 @@ struct sctp_opt {
struct
sctp_rtoinfo
rtoinfo
;
struct
sctp_paddrparams
paddrparam
;
struct
sctp_event_subscribe
subscribe
;
int
user_frag
;
__u32
autoclose
;
__u8
nodelay
;
__u8
disable_fragments
;
__u8
pd_mode
;
__u8
v4mapped
;
/* Receive to here while partial delivery is in effect. */
struct
sk_buff_head
pd_lobby
;
...
...
@@ -448,6 +450,37 @@ static inline __u16 sctp_ssn_next(struct sctp_stream *stream, __u16 id)
return
stream
->
ssn
[
id
]
++
;
}
/* Structure to track chunk fragments that have been acked, but peer
* fragments of the same message have not.
*/
struct
sctp_datamsg
{
/* Chunks waiting to be submitted to lower layer. */
struct
list_head
chunks
;
/* Chunks that have been transmitted. */
struct
list_head
track
;
/* Reference counting. */
atomic_t
refcnt
;
/* When is this message no longer interesting to the peer? */
unsigned
long
expires_at
;
/* Did the messenge fail to send? */
int
send_error
;
char
send_failed
;
/* Control whether fragments from this message can expire. */
char
can_expire
;
};
struct
sctp_datamsg
*
sctp_datamsg_from_user
(
struct
sctp_association
*
,
struct
sctp_sndrcvinfo
*
,
struct
msghdr
*
,
int
len
);
struct
sctp_datamsg
*
sctp_datamsg_new
(
int
gfp
);
void
sctp_datamsg_put
(
struct
sctp_datamsg
*
);
void
sctp_datamsg_hold
(
struct
sctp_datamsg
*
);
void
sctp_datamsg_free
(
struct
sctp_datamsg
*
);
void
sctp_datamsg_track
(
struct
sctp_chunk
*
);
void
sctp_datamsg_assign
(
struct
sctp_datamsg
*
,
struct
sctp_chunk
*
);
void
sctp_datamsg_fail
(
struct
sctp_chunk
*
,
int
error
);
int
sctp_datamsg_expires
(
struct
sctp_chunk
*
);
/* RFC2960 1.4 Key Terms
*
...
...
@@ -462,9 +495,10 @@ struct sctp_chunk {
* three elements of struct sk_buff. This allows us to reuse
* all the skb_* queue management functions.
*/
s
ctp_chunk_t
*
next
;
s
ctp_chunk_t
*
prev
;
s
truct
sctp_chunk
*
next
;
s
truct
sctp_chunk
*
prev
;
struct
sk_buff_head
*
list
;
atomic_t
refcnt
;
/* This is our link to the per-transport transmitted list. */
struct
list_head
transmitted_list
;
...
...
@@ -514,43 +548,52 @@ struct sctp_chunk {
struct
sctp_association
*
asoc
;
/* What endpoint received this chunk? */
s
ctp_endpoint_common_t
*
rcvr
;
s
truct
sctp_ep_common
*
rcvr
;
/* We fill this in if we are calculating RTT. */
unsigned
long
sent_at
;
__u8
rtt_in_progress
;
/* Is this chunk used for RTT calculation? */
__u8
num_times_sent
;
/* How man times did we send this? */
__u8
has_tsn
;
/* Does this chunk have a TSN yet? */
__u8
has_ssn
;
/* Does this chunk have a SSN yet? */
__u8
singleton
;
/* Was this the only chunk in the packet? */
__u8
end_of_packet
;
/* Was this the last chunk in the packet? */
__u8
ecn_ce_done
;
/* Have we processed the ECN CE bit? */
__u8
pdiscard
;
/* Discard the whole packet now? */
__u8
tsn_gap_acked
;
/* Is this chunk acked by a GAP ACK? */
__u8
fast_retransmit
;
/* Is this chunk fast retransmitted? */
__u8
tsn_missing_report
;
/* Data chunk missing counter. */
/* What is the origin IP address for this chunk? */
union
sctp_addr
source
;
/* Destination address for this chunk. */
union
sctp_addr
dest
;
/* For outbound message, track all fragments for SEND_FAILED. */
struct
sctp_datamsg
*
msg
;
/* For an inbound chunk, this tells us where it came from.
* For an outbound chunk, it tells us where we'd like it to
* go. It is NULL if we have no preference.
*/
struct
sctp_transport
*
transport
;
__u8
rtt_in_progress
;
/* Is this chunk used for RTT calculation? */
__u8
resent
;
/* Has this chunk ever been retransmitted. */
__u8
has_tsn
;
/* Does this chunk have a TSN yet? */
__u8
has_ssn
;
/* Does this chunk have a SSN yet? */
__u8
singleton
;
/* Was this the only chunk in the packet? */
__u8
end_of_packet
;
/* Was this the last chunk in the packet? */
__u8
ecn_ce_done
;
/* Have we processed the ECN CE bit? */
__u8
pdiscard
;
/* Discard the whole packet now? */
__u8
tsn_gap_acked
;
/* Is this chunk acked by a GAP ACK? */
__u8
fast_retransmit
;
/* Is this chunk fast retransmitted? */
__u8
tsn_missing_report
;
/* Data chunk missing counter. */
};
sctp_chunk_t
*
sctp_make_chunk
(
const
struct
sctp_association
*
,
__u8
type
,
__u8
flags
,
int
size
);
void
sctp_free_chunk
(
sctp_chunk_t
*
);
void
*
sctp_addto_chunk
(
sctp_chunk_t
*
chunk
,
int
len
,
const
void
*
data
);
sctp_chunk_t
*
sctp_chunkify
(
struct
sk_buff
*
,
const
struct
sctp_association
*
,
struct
sock
*
);
void
sctp_init_addrs
(
sctp_chunk_t
*
,
union
sctp_addr
*
,
union
sctp_addr
*
);
const
union
sctp_addr
*
sctp_source
(
const
sctp_chunk_t
*
chunk
);
void
sctp_chunk_hold
(
struct
sctp_chunk
*
);
void
sctp_chunk_put
(
struct
sctp_chunk
*
);
int
sctp_user_addto_chunk
(
struct
sctp_chunk
*
chunk
,
int
off
,
int
len
,
struct
iovec
*
data
);
struct
sctp_chunk
*
sctp_make_chunk
(
const
struct
sctp_association
*
,
__u8
type
,
__u8
flags
,
int
size
);
void
sctp_chunk_free
(
struct
sctp_chunk
*
);
void
*
sctp_addto_chunk
(
struct
sctp_chunk
*
,
int
len
,
const
void
*
data
);
struct
sctp_chunk
*
sctp_chunkify
(
struct
sk_buff
*
,
const
struct
sctp_association
*
,
struct
sock
*
);
void
sctp_init_addrs
(
struct
sctp_chunk
*
,
union
sctp_addr
*
,
union
sctp_addr
*
);
const
union
sctp_addr
*
sctp_source
(
const
struct
sctp_chunk
*
chunk
);
/* This is a structure for holding either an IPv6 or an IPv4 address. */
/* sin_family -- AF_INET or AF_INET6
...
...
@@ -562,7 +605,7 @@ struct sockaddr_storage_list {
union
sctp_addr
a
;
};
typedef
s
ctp_chunk_t
*
(
sctp_packet_phandler_t
)(
struct
sctp_association
*
);
typedef
s
truct
sctp_chunk
*
(
sctp_packet_phandler_t
)(
struct
sctp_association
*
);
/* This structure holds lists of chunks as we are assembling for
* transmission.
...
...
@@ -619,7 +662,7 @@ typedef struct sctp_packet *(sctp_outq_ohandler_config_t)
int
ecn_capable
,
sctp_packet_phandler_t
*
get_prepend_chunk
);
typedef
sctp_xmit_t
(
sctp_outq_ohandler_t
)(
struct
sctp_packet
*
,
s
ctp_chunk_t
*
);
s
truct
sctp_chunk
*
);
typedef
int
(
sctp_outq_ohandler_force_t
)(
struct
sctp_packet
*
);
sctp_outq_ohandler_init_t
sctp_packet_init
;
...
...
@@ -697,7 +740,6 @@ struct sctp_transport {
*/
int
rto_pending
;
/*
* These are the congestion stats.
*/
...
...
@@ -771,9 +813,6 @@ struct sctp_transport {
*/
int
max_retrans
;
/* We use this name for debugging output... */
char
*
debug_name
;
/* Per : A timer used by each destination.
* Destination :
* Timer :
...
...
@@ -799,6 +838,35 @@ struct sctp_transport {
struct
list_head
send_ready
;
int
malloced
;
/* Is this structure kfree()able? */
/* State information saved for SFR_CACC algorithm. The key
* idea in SFR_CACC is to maintain state at the sender on a
* per-destination basis when a changeover happens.
* char changeover_active;
* char cycling_changeover;
* __u32 next_tsn_at_change;
* char cacc_saw_newack;
*/
struct
{
/* An unsigned integer, which stores the next TSN to be
* used by the sender, at the moment of changeover.
*/
__u32
next_tsn_at_change
;
/* A flag which indicates the occurrence of a changeover */
char
changeover_active
;
/* A glag which indicates whether the change of primary is
* the first switch to this destination address during an
* active switch.
*/
char
cycling_changeover
;
/* A temporary flag, which is used during the processing of
* a SACK to estimate the causative TSN(s)'s group.
*/
char
cacc_saw_newack
;
}
cacc
;
};
struct
sctp_transport
*
sctp_transport_new
(
const
union
sctp_addr
*
,
int
);
...
...
@@ -831,7 +899,7 @@ struct sctp_inq {
/* This is the packet which is currently off the in queue and is
* being worked on through the inbound chunk processing.
*/
s
ctp_chunk_t
*
in_progress
;
s
truct
sctp_chunk
*
in_progress
;
/* This is the delayed task to finish delivering inbound
* messages.
...
...
@@ -844,7 +912,7 @@ struct sctp_inq {
struct
sctp_inq
*
sctp_inq_new
(
void
);
void
sctp_inq_init
(
struct
sctp_inq
*
);
void
sctp_inq_free
(
struct
sctp_inq
*
);
void
sctp_inq_push
(
struct
sctp_inq
*
,
s
ctp_chunk_t
*
packet
);
void
sctp_inq_push
(
struct
sctp_inq
*
,
s
truct
sctp_chunk
*
packet
);
struct
sctp_chunk
*
sctp_inq_pop
(
struct
sctp_inq
*
);
void
sctp_inq_set_th_handler
(
struct
sctp_inq
*
,
void
(
*
)(
void
*
),
void
*
);
...
...
@@ -904,18 +972,21 @@ struct sctp_outq {
/* How many unackd bytes do we have in-flight? */
__u32
outstanding_bytes
;
/* Corked? */
char
cork
;
/* Is this structure empty? */
int
empty
;
char
empty
;
/* Are we kfree()able? */
int
malloced
;
char
malloced
;
};
struct
sctp_outq
*
sctp_outq_new
(
struct
sctp_association
*
);
void
sctp_outq_init
(
struct
sctp_association
*
,
struct
sctp_outq
*
);
void
sctp_outq_teardown
(
struct
sctp_outq
*
);
void
sctp_outq_free
(
struct
sctp_outq
*
);
int
sctp_outq_tail
(
struct
sctp_outq
*
,
s
ctp_chunk_t
*
chunk
);
int
sctp_outq_tail
(
struct
sctp_outq
*
,
s
truct
sctp_chunk
*
chunk
);
int
sctp_outq_flush
(
struct
sctp_outq
*
,
int
);
int
sctp_outq_sack
(
struct
sctp_outq
*
,
sctp_sackhdr_t
*
);
int
sctp_outq_is_empty
(
const
struct
sctp_outq
*
);
...
...
@@ -926,10 +997,16 @@ int sctp_outq_set_output_handlers(struct sctp_outq *,
sctp_outq_ohandler_t
build
,
sctp_outq_ohandler_force_t
force
);
void
sctp_outq_restart
(
struct
sctp_outq
*
);
void
sctp_retransmit
(
struct
sctp_outq
*
,
struct
sctp_transport
*
,
sctp_retransmit_reason_t
);
void
sctp_retransmit_mark
(
struct
sctp_outq
*
,
struct
sctp_transport
*
,
__u8
);
int
sctp_outq_uncork
(
struct
sctp_outq
*
);
/* Uncork and flush an outqueue. */
static
inline
void
sctp_outq_cork
(
struct
sctp_outq
*
q
)
{
q
->
cork
=
1
;
}
/* These bind address data fields common between endpoints and associations */
struct
sctp_bind_addr
{
...
...
@@ -952,15 +1029,16 @@ struct sctp_bind_addr {
int
malloced
;
/* Are we kfree()able? */
};
sctp_bind_addr_t
*
sctp_bind_addr_new
(
int
gfp_mask
);
void
sctp_bind_addr_init
(
sctp_bind_addr_t
*
,
__u16
port
);
void
sctp_bind_addr_free
(
sctp_bind_addr_t
*
);
int
sctp_bind_addr_copy
(
sctp_bind_addr_t
*
dest
,
const
sctp_bind_addr_t
*
src
,
struct
sctp_bind_addr
*
sctp_bind_addr_new
(
int
gfp_mask
);
void
sctp_bind_addr_init
(
struct
sctp_bind_addr
*
,
__u16
port
);
void
sctp_bind_addr_free
(
struct
sctp_bind_addr
*
);
int
sctp_bind_addr_copy
(
struct
sctp_bind_addr
*
dest
,
const
struct
sctp_bind_addr
*
src
,
sctp_scope_t
scope
,
int
gfp
,
int
flags
);
int
sctp_add_bind_addr
(
s
ctp_bind_addr_t
*
,
union
sctp_addr
*
,
int
sctp_add_bind_addr
(
s
truct
sctp_bind_addr
*
,
union
sctp_addr
*
,
int
gfp
);
int
sctp_del_bind_addr
(
s
ctp_bind_addr_t
*
,
union
sctp_addr
*
);
int
sctp_bind_addr_match
(
s
ctp_bind_addr_t
*
,
const
union
sctp_addr
*
,
int
sctp_del_bind_addr
(
s
truct
sctp_bind_addr
*
,
union
sctp_addr
*
);
int
sctp_bind_addr_match
(
s
truct
sctp_bind_addr
*
,
const
union
sctp_addr
*
,
struct
sctp_opt
*
);
union
sctp_params
sctp_bind_addrs_to_raw
(
const
struct
sctp_bind_addr
*
bp
,
int
*
addrs_len
,
int
gfp
);
...
...
@@ -973,7 +1051,7 @@ int sctp_is_any(const union sctp_addr *addr);
int
sctp_addr_is_valid
(
const
union
sctp_addr
*
addr
);
/* What type of
sctp_endpoint_common
? */
/* What type of
endpoint
? */
typedef
enum
{
SCTP_EP_TYPE_SOCKET
,
SCTP_EP_TYPE_ASSOCIATION
,
...
...
@@ -995,10 +1073,10 @@ typedef enum {
*
*/
struct
sctp_e
ndpoint
_common
{
struct
sctp_e
p
_common
{
/* Fields to help us manage our entries in the hash tables. */
s
ctp_endpoint_common_t
*
next
;
s
ctp_endpoint_common_t
**
pprev
;
s
truct
sctp_ep_common
*
next
;
s
truct
sctp_ep_common
**
pprev
;
int
hashent
;
/* Runtime type information. What kind of endpoint is this? */
...
...
@@ -1024,7 +1102,7 @@ struct sctp_endpoint_common {
* bind_addr.port is our shared port number.
* bind_addr.address_list is our set of local IP addresses.
*/
s
ctp_bind_addr_t
bind_addr
;
s
truct
sctp_bind_addr
bind_addr
;
/* Protection during address list comparisons. */
rwlock_t
addr_lock
;
...
...
@@ -1052,12 +1130,7 @@ struct sctp_endpoint_common {
struct
sctp_endpoint
{
/* Common substructure for endpoint and association. */
sctp_endpoint_common_t
base
;
/* These are the system-wide defaults and other stuff which is
* endpoint-independent.
*/
struct
sctp_protocol
*
proto
;
struct
sctp_ep_common
base
;
/* Associations: A list of current associations and mappings
* to the data consumers for each association. This
...
...
@@ -1092,28 +1165,29 @@ struct sctp_endpoint {
};
/* Recover the outter endpoint structure. */
static
inline
s
ctp_endpoint_t
*
sctp_ep
(
sctp_endpoint_common_t
*
base
)
static
inline
s
truct
sctp_endpoint
*
sctp_ep
(
struct
sctp_ep_common
*
base
)
{
s
ctp_endpoint_
t
*
ep
;
s
truct
sctp_endpoin
t
*
ep
;
ep
=
container_of
(
base
,
s
ctp_endpoint_
t
,
base
);
ep
=
container_of
(
base
,
s
truct
sctp_endpoin
t
,
base
);
return
ep
;
}
/* These are function signatures for manipulating endpoints. */
sctp_endpoint_t
*
sctp_endpoint_new
(
struct
sctp_protocol
*
,
struct
sock
*
,
int
);
sctp_endpoint_t
*
sctp_endpoint_init
(
struct
sctp_endpoint
*
,
struct
sctp_protocol
*
,
struct
sock
*
,
int
gfp
);
void
sctp_endpoint_free
(
sctp_endpoint_t
*
);
void
sctp_endpoint_put
(
sctp_endpoint_t
*
);
void
sctp_endpoint_hold
(
sctp_endpoint_t
*
);
void
sctp_endpoint_add_asoc
(
sctp_endpoint_t
*
,
struct
sctp_association
*
asoc
);
struct
sctp_association
*
sctp_endpoint_lookup_assoc
(
const
sctp_endpoint_t
*
ep
,
const
union
sctp_addr
*
paddr
,
struct
sctp_transport
**
);
int
sctp_endpoint_is_peeled_off
(
sctp_endpoint_t
*
,
const
union
sctp_addr
*
);
sctp_endpoint_t
*
sctp_endpoint_is_match
(
sctp_endpoint_t
*
,
struct
sctp_endpoint
*
sctp_endpoint_new
(
struct
sock
*
,
int
);
struct
sctp_endpoint
*
sctp_endpoint_init
(
struct
sctp_endpoint
*
,
struct
sock
*
,
int
gfp
);
void
sctp_endpoint_free
(
struct
sctp_endpoint
*
);
void
sctp_endpoint_put
(
struct
sctp_endpoint
*
);
void
sctp_endpoint_hold
(
struct
sctp_endpoint
*
);
void
sctp_endpoint_add_asoc
(
struct
sctp_endpoint
*
,
struct
sctp_association
*
);
struct
sctp_association
*
sctp_endpoint_lookup_assoc
(
const
struct
sctp_endpoint
*
ep
,
const
union
sctp_addr
*
paddr
,
struct
sctp_transport
**
);
int
sctp_endpoint_is_peeled_off
(
struct
sctp_endpoint
*
,
const
union
sctp_addr
*
);
struct
sctp_endpoint
*
sctp_endpoint_is_match
(
struct
sctp_endpoint
*
,
const
union
sctp_addr
*
);
int
sctp_has_association
(
const
union
sctp_addr
*
laddr
,
const
union
sctp_addr
*
paddr
);
...
...
@@ -1126,8 +1200,8 @@ int sctp_process_init(struct sctp_association *, sctp_cid_t cid,
sctp_init_chunk_t
*
init
,
int
gfp
);
int
sctp_process_param
(
struct
sctp_association
*
,
union
sctp_params
param
,
const
union
sctp_addr
*
from
,
int
gfp
);
__u32
sctp_generate_tag
(
const
s
ctp_endpoint_
t
*
);
__u32
sctp_generate_tsn
(
const
s
ctp_endpoint_
t
*
);
__u32
sctp_generate_tag
(
const
s
truct
sctp_endpoin
t
*
);
__u32
sctp_generate_tsn
(
const
s
truct
sctp_endpoin
t
*
);
/* RFC2960
...
...
@@ -1150,7 +1224,7 @@ struct sctp_association {
* In this context, it represents the associations's view
* of the local endpoint of the association.
*/
s
ctp_endpoint_common_t
base
;
s
truct
sctp_ep_common
base
;
/* Associations on the same socket. */
struct
list_head
asocs
;
...
...
@@ -1162,7 +1236,7 @@ struct sctp_association {
__u32
eyecatcher
;
/* This is our parent endpoint. */
s
ctp_endpoint_
t
*
ep
;
s
truct
sctp_endpoin
t
*
ep
;
/* These are those association elements needed in the cookie. */
sctp_cookie_t
c
;
...
...
@@ -1337,7 +1411,6 @@ struct sctp_association {
/* The largest timeout or RTO value to use in attempting an INIT */
__u16
max_init_timeo
;
int
timeouts
[
SCTP_NUM_TIMEOUT_TYPES
];
struct
timer_list
timers
[
SCTP_NUM_TIMEOUT_TYPES
];
...
...
@@ -1438,9 +1511,6 @@ struct sctp_association {
*/
struct
sctp_ulpq
ulpq
;
/* Need to send an ECNE Chunk? */
int
need_ecne
;
/* Last TSN that caused an ECNE Chunk to be sent. */
__u32
last_ecne_tsn
;
...
...
@@ -1453,9 +1523,6 @@ struct sctp_association {
/* Number of seconds of idle time before an association is closed. */
__u32
autoclose
;
/* Name for debugging output... */
char
*
debug_name
;
/* These are to support
* "SCTP Extensions for Dynamic Reconfiguration of IP Addresses
* and Enforcement of Flow and Message Limits"
...
...
@@ -1463,8 +1530,7 @@ struct sctp_association {
* or "ADDIP" for short.
*/
/* Is the ADDIP extension enabled for this association? */
int
addip_enable
;
/* ADDIP Section 4.1.1 Congestion Control of ASCONF Chunks
*
...
...
@@ -1480,7 +1546,7 @@ struct sctp_association {
* [This is our one-and-only-one ASCONF in flight. If we do
* not have an ASCONF in flight, this is NULL.]
*/
s
ctp_chunk_t
*
addip_last_asconf
;
s
truct
sctp_chunk
*
addip_last_asconf
;
/* ADDIP Section 4.2 Upon reception of an ASCONF Chunk.
*
...
...
@@ -1495,7 +1561,7 @@ struct sctp_association {
* [This is our saved ASCONF-ACK. We invalidate it when a new
* ASCONF serial number arrives.]
*/
s
ctp_chunk_t
*
addip_last_asconf_ack
;
s
truct
sctp_chunk
*
addip_last_asconf_ack
;
/* These ASCONF chunks are waiting to be sent.
*
...
...
@@ -1548,6 +1614,15 @@ struct sctp_association {
* after reaching 4294967295.
*/
__u32
addip_serial
;
/* Is the ADDIP extension enabled for this association? */
char
addip_enable
;
/* Need to send an ECNE Chunk? */
char
need_ecne
;
/* Is it a temporary association? */
char
temp
;
};
...
...
@@ -1559,7 +1634,7 @@ enum {
};
/* Recover the outter association structure. */
static
inline
struct
sctp_association
*
sctp_assoc
(
s
ctp_endpoint_common_t
*
base
)
static
inline
struct
sctp_association
*
sctp_assoc
(
s
truct
sctp_ep_common
*
base
)
{
struct
sctp_association
*
asoc
;
...
...
@@ -1571,10 +1646,10 @@ static inline struct sctp_association *sctp_assoc(sctp_endpoint_common_t *base)
struct
sctp_association
*
sctp_association_new
(
const
s
ctp_endpoint_
t
*
,
const
struct
sock
*
,
sctp_association_new
(
const
s
truct
sctp_endpoin
t
*
,
const
struct
sock
*
,
sctp_scope_t
scope
,
int
gfp
);
struct
sctp_association
*
sctp_association_init
(
struct
sctp_association
*
,
const
s
ctp_endpoint_
t
*
,
sctp_association_init
(
struct
sctp_association
*
,
const
s
truct
sctp_endpoin
t
*
,
const
struct
sock
*
,
sctp_scope_t
scope
,
int
gfp
);
void
sctp_association_free
(
struct
sctp_association
*
);
...
...
@@ -1614,8 +1689,8 @@ int sctp_assoc_set_bind_addr_from_cookie(struct sctp_association *,
int
sctp_cmp_addr_exact
(
const
union
sctp_addr
*
ss1
,
const
union
sctp_addr
*
ss2
);
s
ctp_chunk_t
*
sctp_get_ecne_prepend
(
struct
sctp_association
*
asoc
);
s
ctp_chunk_t
*
sctp_get_no_prepend
(
struct
sctp_association
*
asoc
);
s
truct
sctp_chunk
*
sctp_get_ecne_prepend
(
struct
sctp_association
*
asoc
);
s
truct
sctp_chunk
*
sctp_get_no_prepend
(
struct
sctp_association
*
asoc
);
/* A convenience structure to parse out SCTP specific CMSGs. */
typedef
struct
sctp_cmsgs
{
...
...
include/net/sctp/tsnmap.h
View file @
3e446c25
...
...
@@ -114,7 +114,7 @@ struct sctp_tsnmap_iter {
};
/* Create a new tsnmap. */
struct
sctp_tsnmap
*
sctp_tsnmap_new
(
__u16
len
,
__u32
init_tsn
,
int
priority
);
struct
sctp_tsnmap
*
sctp_tsnmap_new
(
__u16
len
,
__u32
init_tsn
,
int
gfp
);
/* Dispose of a tsnmap. */
void
sctp_tsnmap_free
(
struct
sctp_tsnmap
*
);
...
...
include/net/sctp/ulpevent.h
View file @
3e446c25
...
...
@@ -10,13 +10,15 @@
* sctp_ulpevent type is used to carry information from the state machine
* upwards to the ULP.
*
* The SCTP reference implementation is free software;
* This file is part of the SCTP kernel reference Implementation
*
* The SCTP reference implementation is free software;
* you can redistribute it and/or modify it under the terms of
* the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
*
the SCTP reference implementation
is distributed in the hope that it
*
The SCTP reference implementation
is distributed in the hope that it
* will be useful, but WITHOUT ANY WARRANTY; without even the implied
* ************************
* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
...
...
@@ -27,12 +29,17 @@
* the Free Software Foundation, 59 Temple Place - Suite 330,
* Boston, MA 02111-1307, USA.
*
* Please send any bug reports or fixes you make to one of the
* following email addresses:
* Please send any bug reports or fixes you make to the
* email address(es):
* lksctp developers <lksctp-developers@lists.sourceforge.net>
*
* Or submit a bug report through the following website:
* http://www.sf.net/projects/lksctp
*
* Jon Grimm <jgrimm@us.ibm.com>
* La Monte H.P. Yarroll <piggy@acm.org>
* Karl Knutson <karl@athena.chicago.il.us>
* Written or modified by:
* Jon Grimm <jgrimm@us.ibm.com>
* La Monte H.P. Yarroll <piggy@acm.org>
* Karl Knutson <karl@athena.chicago.il.us>
*
* Any bugs reported given to us we will try to fix... any fixes shared will
* be incorporated into the next SCTP release.
...
...
@@ -64,7 +71,7 @@ static inline struct sctp_ulpevent *sctp_skb2event(struct sk_buff *skb)
return
(
struct
sctp_ulpevent
*
)
skb
->
cb
;
}
struct
sctp_ulpevent
*
sctp_ulpevent_new
(
int
size
,
int
flags
,
int
priority
);
struct
sctp_ulpevent
*
sctp_ulpevent_new
(
int
size
,
int
flags
,
int
gfp
);
struct
sctp_ulpevent
*
sctp_ulpevent_init
(
struct
sctp_ulpevent
*
,
int
flags
);
void
sctp_ulpevent_free
(
struct
sctp_ulpevent
*
);
int
sctp_ulpevent_is_notification
(
const
struct
sctp_ulpevent
*
);
...
...
@@ -76,7 +83,7 @@ struct sctp_ulpevent *sctp_ulpevent_make_assoc_change(
__u16
error
,
__u16
outbound
,
__u16
inbound
,
int
priority
);
int
gfp
);
struct
sctp_ulpevent
*
sctp_ulpevent_make_peer_addr_change
(
const
struct
sctp_association
*
asoc
,
...
...
@@ -84,32 +91,32 @@ struct sctp_ulpevent *sctp_ulpevent_make_peer_addr_change(
int
flags
,
int
state
,
int
error
,
int
priority
);
int
gfp
);
struct
sctp_ulpevent
*
sctp_ulpevent_make_remote_error
(
const
struct
sctp_association
*
asoc
,
struct
sctp_chunk
*
chunk
,
__u16
flags
,
int
priority
);
int
gfp
);
struct
sctp_ulpevent
*
sctp_ulpevent_make_send_failed
(
const
struct
sctp_association
*
asoc
,
struct
sctp_chunk
*
chunk
,
__u16
flags
,
__u32
error
,
int
priority
);
int
gfp
);
struct
sctp_ulpevent
*
sctp_ulpevent_make_shutdown_event
(
const
struct
sctp_association
*
asoc
,
__u16
flags
,
int
priority
);
int
gfp
);
struct
sctp_ulpevent
*
sctp_ulpevent_make_pdapi
(
const
struct
sctp_association
*
asoc
,
__u32
indication
,
int
priority
);
__u32
indication
,
int
gfp
);
struct
sctp_ulpevent
*
sctp_ulpevent_make_rcvmsg
(
struct
sctp_association
*
asoc
,
struct
sctp_chunk
*
chunk
,
int
priority
);
int
gfp
);
void
sctp_ulpevent_read_sndrcvinfo
(
const
struct
sctp_ulpevent
*
event
,
struct
msghdr
*
);
...
...
include/net/sctp/ulpqueue.h
View file @
3e446c25
...
...
@@ -50,14 +50,15 @@
struct
sctp_ulpq
{
char
malloced
;
char
pd_mode
;
s
ctp_association_t
*
asoc
;
s
truct
sctp_association
*
asoc
;
struct
sk_buff_head
reasm
;
struct
sk_buff_head
lobby
;
};
/* Prototypes. */
struct
sctp_ulpq
*
sctp_ulpq_new
(
sctp_association_t
*
asoc
,
int
priority
);
struct
sctp_ulpq
*
sctp_ulpq_init
(
struct
sctp_ulpq
*
,
sctp_association_t
*
);
struct
sctp_ulpq
*
sctp_ulpq_new
(
struct
sctp_association
*
asoc
,
int
gfp
);
struct
sctp_ulpq
*
sctp_ulpq_init
(
struct
sctp_ulpq
*
,
struct
sctp_association
*
);
void
sctp_ulpq_free
(
struct
sctp_ulpq
*
);
/* Add a new DATA chunk for processing. */
...
...
include/net/sctp/user.h
View file @
3e446c25
...
...
@@ -110,6 +110,10 @@ enum sctp_optname {
#define SCTP_GET_LOCAL_ADDRS SCTP_GET_LOCAL_ADDRS
SCTP_NODELAY
,
/* Get/set nodelay option. */
#define SCTP_NODELAY SCTP_NODELAY
SCTP_I_WANT_MAPPED_V4_ADDR
,
/* Turn on/off mapped v4 addresses */
#define SCTP_I_WANT_MAPPED_V4_ADDR SCTP_I_WANT_MAPPED_V4_ADDR
SCTP_MAXSEG
,
/* Get/set maximum fragment. */
#define SCTP_MAXSEG SCTP_MAXSEG
};
...
...
net/sctp/Kconfig
View file @
3e446c25
...
...
@@ -43,12 +43,12 @@ config SCTP_ADLER32
bool "SCTP: Use old checksum (Adler-32)"
depends on IP_SCTP
help
RCF2960 currently specifies the Adler-32 checksum algorithm for SCTP.
RCF2960 currently specifies the Adler-32 checksum algorithm for SCTP.
This has been deprecated and replaced by an algorithm now referred
to as crc32c.
If you say Y, this will use the Adler-32 algorithm, this might be
useful
for interoperation with downlevel peers.
If you say Y, this will use the Adler-32 algorithm, this might be
useful
for interoperation with downlevel peers.
If unsure, say N.
...
...
@@ -58,19 +58,46 @@ config SCTP_DBG_MSG
help
If you say Y, this will enable verbose debugging messages.
If unsure, say N. However, if you are running into problems, use
this
option to gather detailed trace information
If unsure, say N. However, if you are running into problems, use
this
option to gather detailed trace information
config SCTP_DBG_OBJCNT
bool "SCTP: Debug object counts"
depends on IP_SCTP
help
If you say Y, this will enable debugging support for counting the types
of objects that are currently allocated. This is useful for identifying
memory leaks. If the /proc filesystem is enabled this debug information
can be viewed by 'cat /proc/net/sctp/sctp_dbg_objcnt'
If you say Y, this will enable debugging support for counting the
type of objects that are currently allocated. This is useful for
identifying memory leaks. If the /proc filesystem is enabled this
debug information can be viewed by
'cat /proc/net/sctp/sctp_dbg_objcnt'
If unsure, say N
endmenu
choice
prompt "SCTP: Cookie HMAC Algorithm"
help
HMAC algorithm to be used during association initialization. It
is strongly recommended to use HMAC-SHA1 or HMAC-MD5. See
configuration for Cryptographic API and enable those algorithms
to make usable by SCTP.
config SCTP_HMAC_NONE
bool "None"
help
Choosing this disables the use of an HMAC during association
establishment. It is advised to use either HMAC-MD5 or HMAC-SHA1.
config SCTP_HMAC_SHA1
bool "HMAC-SHA1" if CRYPTO_HMAC=y && CRYPTO_SHA1=y || CRYPTO_SHA1=m
help
Enable the use of HMAC-SHA1 during association establishment. It
is advised to use either HMAC-MD5 or HMAC-SHA1.
config SCTP_HMAC_MD5
bool "HMAC-MD5" if CRYPTO_HMAC=y && CRYPTO_MD5=y || CRYPTO_MD5=m
help
Enable the use of HMAC-MD5 during association establishment. It is
advised to use either HMAC-MD5 or HMAC-SHA1.
endchoice
endmenu
net/sctp/Makefile
View file @
3e446c25
...
...
@@ -6,11 +6,10 @@ obj-$(CONFIG_IP_SCTP) += sctp.o
sctp-y
:=
sm_statetable.o sm_statefuns.o sm_sideeffect.o
\
protocol.o endpointola.o associola.o
\
transport.o sm_make_chunk.o ulpevent.o
\
transport.o
chunk.o
sm_make_chunk.o ulpevent.o
\
inqueue.o outqueue.o ulpqueue.o command.o
\
tsnmap.o bind_addr.o socket.o primitive.o
\
output.o input.o hashdriver.o sla1.o
\
debug.o ssnmap.o proc.o
output.o input.o debug.o ssnmap.o proc.o
ifeq
($(CONFIG_SCTP_ADLER32), y)
sctp-y
+=
adler32.o
...
...
net/sctp/adler32.c
View file @
3e446c25
...
...
@@ -2,43 +2,43 @@
* Copyright (c) 1999-2000 Cisco, Inc.
* Copyright (c) 1999-2001 Motorola, Inc.
* Copyright (c) 2003 International Business Machines, Corp.
*
*
* This file is part of the SCTP kernel reference Implementation
*
* This file has direct heritage from the SCTP user-level reference
*
* This file has direct heritage from the SCTP user-level reference
* implementation by R. Stewart, et al. These functions implement the
* Adler-32 algorithm as specified by RFC 2960.
*
* The SCTP reference implementation is free software;
* you can redistribute it and/or modify it under the terms of
* Adler-32 algorithm as specified by RFC 2960.
*
* The SCTP reference implementation is free software;
* you can redistribute it and/or modify it under the terms of
* the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* The SCTP reference implementation is distributed in the hope that it
*
* The SCTP reference implementation is distributed in the hope that it
* will be useful, but WITHOUT ANY WARRANTY; without even the implied
* ************************
* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU General Public License for more details.
*
*
* You should have received a copy of the GNU General Public License
* along with GNU CC; see the file COPYING. If not, write to
* the Free Software Foundation, 59 Temple Place - Suite 330,
* Boston, MA 02111-1307, USA.
*
* Boston, MA 02111-1307, USA.
*
* Please send any bug reports or fixes you make to the
* email address(es):
* lksctp developers <lksctp-developers@lists.sourceforge.net>
*
*
* Or submit a bug report through the following website:
* http://www.sf.net/projects/lksctp
*
* Written or modified by:
* Written or modified by:
* Randall Stewart <rstewar1@email.mot.com>
* Ken Morneau <kmorneau@cisco.com>
* Qiaobing Xie <qxie1@email.mot.com>
* Sridhar Samudrala <sri@us.ibm.com>
*
*
* Any bugs reported given to us we will try to fix... any fixes shared will
* be incorporated into the next SCTP release.
*/
...
...
@@ -65,7 +65,7 @@
* tad, but I have commented the original lines below
*/
#include <linux/types.h>
#include <linux/types.h>
#include <net/sctp/sctp.h>
#define BASE 65521
/* largest prime smaller than 65536 */
...
...
@@ -111,7 +111,7 @@ unsigned long update_adler32(unsigned long adler,
* This would then be (2 * BASE) - 2, which
* will still only do one subtract. On Intel
* this is much better to do this way and
* avoid the divide. Have not -pg'd on
* avoid the divide. Have not -pg'd on
* sparc.
*/
if
(
s2
>=
BASE
)
{
...
...
@@ -135,7 +135,7 @@ __u32 sctp_start_cksum(__u8 *ptr, __u16 count)
__u32
zero
=
0L
;
/* Calculate the CRC up to the checksum field. */
adler
=
update_adler32
(
adler
,
ptr
,
adler
=
update_adler32
(
adler
,
ptr
,
sizeof
(
struct
sctphdr
)
-
sizeof
(
__u32
));
/* Skip over the checksum field. */
adler
=
update_adler32
(
adler
,
(
unsigned
char
*
)
&
zero
,
...
...
@@ -156,6 +156,15 @@ __u32 sctp_update_cksum(__u8 *ptr, __u16 count, __u32 adler)
return
adler
;
}
__u32
sctp_update_copy_cksum
(
__u8
*
to
,
__u8
*
from
,
__u16
count
,
__u32
adler
)
{
/* Its not worth it to try harder. Adler32 is obsolescent. */
adler
=
update_adler32
(
adler
,
from
,
count
);
memcpy
(
to
,
from
,
count
);
return
adler
;
}
__u32
sctp_end_cksum
(
__u32
adler
)
{
return
adler
;
...
...
net/sctp/associola.c
View file @
3e446c25
...
...
@@ -58,23 +58,23 @@
#include <net/sctp/sctp.h>
/* Forward declarations for internal functions. */
static
void
sctp_assoc_bh_rcv
(
s
ctp_association_t
*
asoc
);
static
void
sctp_assoc_bh_rcv
(
s
truct
sctp_association
*
asoc
);
/* 1st Level Abstractions. */
/* Allocate and initialize a new association */
s
ctp_association_t
*
sctp_association_new
(
const
sctp_endpoint_
t
*
ep
,
s
truct
sctp_association
*
sctp_association_new
(
const
struct
sctp_endpoin
t
*
ep
,
const
struct
sock
*
sk
,
sctp_scope_t
scope
,
int
priority
)
sctp_scope_t
scope
,
int
gfp
)
{
s
ctp_association_t
*
asoc
;
s
truct
sctp_association
*
asoc
;
asoc
=
t_new
(
s
ctp_association_t
,
priority
);
asoc
=
t_new
(
s
truct
sctp_association
,
gfp
);
if
(
!
asoc
)
goto
fail
;
if
(
!
sctp_association_init
(
asoc
,
ep
,
sk
,
scope
,
priority
))
if
(
!
sctp_association_init
(
asoc
,
ep
,
sk
,
scope
,
gfp
))
goto
fail_init
;
asoc
->
base
.
malloced
=
1
;
...
...
@@ -89,23 +89,24 @@ sctp_association_t *sctp_association_new(const sctp_endpoint_t *ep,
}
/* Initialize a new association from provided memory. */
s
ctp_association_t
*
sctp_association_init
(
sctp_association_t
*
asoc
,
const
s
ctp_endpoint_
t
*
ep
,
s
truct
sctp_association
*
sctp_association_init
(
struct
sctp_association
*
asoc
,
const
s
truct
sctp_endpoin
t
*
ep
,
const
struct
sock
*
sk
,
sctp_scope_t
scope
,
int
priority
)
int
gfp
)
{
struct
sctp_opt
*
sp
;
struct
sctp_protocol
*
proto
=
sctp_get_protocol
();
int
i
;
/* Retrieve the SCTP per socket area. */
sp
=
sctp_sk
((
struct
sock
*
)
sk
);
/* Init all variables to a known value. */
memset
(
asoc
,
0
,
sizeof
(
s
ctp_association_t
));
memset
(
asoc
,
0
,
sizeof
(
s
truct
sctp_association
));
/* Discarding const is appropriate here. */
asoc
->
ep
=
(
s
ctp_endpoint_
t
*
)
ep
;
asoc
->
ep
=
(
s
truct
sctp_endpoin
t
*
)
ep
;
sctp_endpoint_hold
(
asoc
->
ep
);
/* Hold the sock. */
...
...
@@ -136,10 +137,10 @@ sctp_association_t *sctp_association_init(sctp_association_t *asoc,
asoc
->
frag_point
=
0
;
/* Initialize the default association max_retrans and RTO values. */
asoc
->
max_retrans
=
ep
->
proto
->
max_retrans_association
;
asoc
->
rto_initial
=
ep
->
proto
->
rto_initial
;
asoc
->
rto_max
=
ep
->
proto
->
rto_max
;
asoc
->
rto_min
=
ep
->
proto
->
rto_min
;
asoc
->
max_retrans
=
proto
->
max_retrans_association
;
asoc
->
rto_initial
=
proto
->
rto_initial
;
asoc
->
rto_max
=
proto
->
rto_max
;
asoc
->
rto_min
=
proto
->
rto_min
;
asoc
->
overall_error_threshold
=
0
;
asoc
->
overall_error_count
=
0
;
...
...
@@ -147,7 +148,7 @@ sctp_association_t *sctp_association_init(sctp_association_t *asoc,
/* Initialize the maximum mumber of new data packets that can be sent
* in a burst.
*/
asoc
->
max_burst
=
ep
->
proto
->
max_burst
;
asoc
->
max_burst
=
proto
->
max_burst
;
/* Copy things from the endpoint. */
for
(
i
=
SCTP_EVENT_TIMEOUT_NONE
;
i
<
SCTP_NUM_TIMEOUT_TYPES
;
++
i
)
{
...
...
@@ -255,7 +256,7 @@ sctp_association_t *sctp_association_init(sctp_association_t *asoc,
sctp_packet_transmit_chunk
,
sctp_packet_transmit
);
if
(
NULL
==
sctp_ulpq_init
(
&
asoc
->
ulpq
,
asoc
))
if
(
!
sctp_ulpq_init
(
&
asoc
->
ulpq
,
asoc
))
goto
fail_init
;
/* Set up the tsn tracking. */
...
...
@@ -265,7 +266,6 @@ sctp_association_t *sctp_association_init(sctp_association_t *asoc,
asoc
->
need_ecne
=
0
;
asoc
->
debug_name
=
"unnamedasoc"
;
asoc
->
eyecatcher
=
SCTP_ASSOC_EYECATCHER
;
/* Assume that peer would support both address types unless we are
...
...
@@ -288,7 +288,7 @@ sctp_association_t *sctp_association_init(sctp_association_t *asoc,
/* Free this association if possible. There may still be users, so
* the actual deallocation may be delayed.
*/
void
sctp_association_free
(
s
ctp_association_t
*
asoc
)
void
sctp_association_free
(
s
truct
sctp_association
*
asoc
)
{
struct
sock
*
sk
=
asoc
->
base
.
sk
;
struct
sctp_transport
*
transport
;
...
...
@@ -298,8 +298,7 @@ void sctp_association_free(sctp_association_t *asoc)
list_del
(
&
asoc
->
asocs
);
/* Decrement the backlog value for a TCP-style listening socket. */
if
((
SCTP_SOCKET_TCP
==
sctp_sk
(
sk
)
->
type
)
&&
(
SCTP_SS_LISTENING
==
sk
->
state
))
if
(
sctp_style
(
sk
,
TCP
)
&&
sctp_sstate
(
sk
,
LISTENING
))
sk
->
ack_backlog
--
;
/* Mark as dead, so other users can know this structure is
...
...
@@ -351,7 +350,7 @@ void sctp_association_free(sctp_association_t *asoc)
}
/* Cleanup and free up an association. */
static
void
sctp_association_destroy
(
s
ctp_association_t
*
asoc
)
static
void
sctp_association_destroy
(
s
truct
sctp_association
*
asoc
)
{
SCTP_ASSERT
(
asoc
->
base
.
dead
,
"Assoc is not dead"
,
return
);
...
...
@@ -379,31 +378,56 @@ void sctp_assoc_set_primary(struct sctp_association *asoc,
*/
if
(
transport
->
active
)
asoc
->
peer
.
active_path
=
transport
;
/*
* SFR-CACC algorithm:
* Upon the receipt of a request to change the primary
* destination address, on the data structure for the new
* primary destination, the sender MUST do the following:
*
* 1) If CHANGEOVER_ACTIVE is set, then there was a switch
* to this destination address earlier. The sender MUST set
* CYCLING_CHANGEOVER to indicate that this switch is a
* double switch to the same destination address.
*/
if
(
transport
->
cacc
.
changeover_active
)
transport
->
cacc
.
cycling_changeover
=
1
;
/* 2) The sender MUST set CHANGEOVER_ACTIVE to indicate that
* a changeover has occurred.
*/
transport
->
cacc
.
changeover_active
=
1
;
/* 3) The sender MUST store the next TSN to be sent in
* next_tsn_at_change.
*/
transport
->
cacc
.
next_tsn_at_change
=
asoc
->
next_tsn
;
}
/* Add a transport address to an association. */
struct
sctp_transport
*
sctp_assoc_add_peer
(
struct
sctp_association
*
asoc
,
const
union
sctp_addr
*
addr
,
int
priority
)
int
gfp
)
{
struct
sctp_transport
*
peer
;
struct
sctp_opt
*
sp
;
unsigned
short
port
;
sp
=
sctp_sk
(
asoc
->
base
.
sk
);
/* AF_INET and AF_INET6 share common port field. */
port
=
addr
->
v4
.
sin_port
;
/* Set the port if it has not been set yet. */
if
(
0
==
asoc
->
peer
.
port
)
{
if
(
0
==
asoc
->
peer
.
port
)
asoc
->
peer
.
port
=
port
;
}
/* Check to see if this is a duplicate. */
peer
=
sctp_assoc_lookup_paddr
(
asoc
,
addr
);
if
(
peer
)
return
peer
;
peer
=
sctp_transport_new
(
addr
,
priority
);
peer
=
sctp_transport_new
(
addr
,
gfp
);
if
(
!
peer
)
return
NULL
;
...
...
@@ -425,7 +449,7 @@ struct sctp_transport *sctp_assoc_add_peer(struct sctp_association *asoc,
SCTP_DEBUG_PRINTK
(
"sctp_assoc_add_peer:association %p PMTU set to "
"%d
\n
"
,
asoc
,
asoc
->
pmtu
);
asoc
->
frag_point
=
sctp_frag_point
(
asoc
->
pmtu
);
asoc
->
frag_point
=
sctp_frag_point
(
sp
,
asoc
->
pmtu
);
/* The asoc->peer.port might not be meaningful yet, but
* initialize the packet structure anyway.
...
...
@@ -470,14 +494,14 @@ struct sctp_transport *sctp_assoc_add_peer(struct sctp_association *asoc,
/* Initialize the peer's heartbeat interval based on the
* sock configured value.
*/
sp
=
sctp_sk
(
asoc
->
base
.
sk
);
peer
->
hb_interval
=
sp
->
paddrparam
.
spp_hbinterval
*
HZ
;
/* Attach the remote transport to our asoc. */
list_add_tail
(
&
peer
->
transports
,
&
asoc
->
peer
.
transport_addr_list
);
/* If we do not yet have a primary path, set one. */
if
(
NULL
==
asoc
->
peer
.
primary_path
)
{
if
(
!
asoc
->
peer
.
primary_path
)
{
sctp_assoc_set_primary
(
asoc
,
peer
);
asoc
->
peer
.
retran_path
=
peer
;
}
...
...
@@ -489,8 +513,9 @@ struct sctp_transport *sctp_assoc_add_peer(struct sctp_association *asoc,
}
/* Lookup a transport by address. */
struct
sctp_transport
*
sctp_assoc_lookup_paddr
(
const
sctp_association_t
*
asoc
,
const
union
sctp_addr
*
address
)
struct
sctp_transport
*
sctp_assoc_lookup_paddr
(
const
struct
sctp_association
*
asoc
,
const
union
sctp_addr
*
address
)
{
struct
sctp_transport
*
t
;
struct
list_head
*
pos
;
...
...
@@ -510,7 +535,7 @@ struct sctp_transport *sctp_assoc_lookup_paddr(const sctp_association_t *asoc,
* Mark the transport up or down and send a notification to the user.
* Select and update the new active and retran paths.
*/
void
sctp_assoc_control_transport
(
s
ctp_association_t
*
asoc
,
void
sctp_assoc_control_transport
(
s
truct
sctp_association
*
asoc
,
struct
sctp_transport
*
transport
,
sctp_transport_cmd_t
command
,
sctp_sn_error_t
error
)
...
...
@@ -589,7 +614,7 @@ void sctp_assoc_control_transport(sctp_association_t *asoc,
/* If we failed to find a usable transport, just camp on the
* primary, even if it is inactive.
*/
if
(
NULL
==
first
)
{
if
(
!
first
)
{
first
=
asoc
->
peer
.
primary_path
;
second
=
asoc
->
peer
.
primary_path
;
}
...
...
@@ -600,7 +625,7 @@ void sctp_assoc_control_transport(sctp_association_t *asoc,
}
/* Hold a reference to an association. */
void
sctp_association_hold
(
s
ctp_association_t
*
asoc
)
void
sctp_association_hold
(
s
truct
sctp_association
*
asoc
)
{
atomic_inc
(
&
asoc
->
base
.
refcnt
);
}
...
...
@@ -608,7 +633,7 @@ void sctp_association_hold(sctp_association_t *asoc)
/* Release a reference to an association and cleanup
* if there are no more references.
*/
void
sctp_association_put
(
s
ctp_association_t
*
asoc
)
void
sctp_association_put
(
s
truct
sctp_association
*
asoc
)
{
if
(
atomic_dec_and_test
(
&
asoc
->
base
.
refcnt
))
sctp_association_destroy
(
asoc
);
...
...
@@ -617,7 +642,7 @@ void sctp_association_put(sctp_association_t *asoc)
/* Allocate the next TSN, Transmission Sequence Number, for the given
* association.
*/
__u32
sctp_association_get_next_tsn
(
s
ctp_association_t
*
asoc
)
__u32
sctp_association_get_next_tsn
(
s
truct
sctp_association
*
asoc
)
{
/* From Section 1.6 Serial Number Arithmetic:
* Transmission Sequence Numbers wrap around when they reach
...
...
@@ -632,7 +657,7 @@ __u32 sctp_association_get_next_tsn(sctp_association_t *asoc)
}
/* Allocate 'num' TSNs by incrementing the association's TSN by num. */
__u32
sctp_association_get_tsn_block
(
s
ctp_association_t
*
asoc
,
int
num
)
__u32
sctp_association_get_tsn_block
(
s
truct
sctp_association
*
asoc
,
int
num
)
{
__u32
retval
=
asoc
->
next_tsn
;
...
...
@@ -662,7 +687,7 @@ int sctp_cmp_addr_exact(const union sctp_addr *ss1,
* Note: We are sly and return a shared, prealloced chunk. FIXME:
* No we don't, but we could/should.
*/
s
ctp_chunk_t
*
sctp_get_ecne_prepend
(
struct
sctp_association
*
asoc
)
s
truct
sctp_chunk
*
sctp_get_ecne_prepend
(
struct
sctp_association
*
asoc
)
{
struct
sctp_chunk
*
chunk
;
...
...
@@ -680,7 +705,7 @@ sctp_chunk_t *sctp_get_ecne_prepend(struct sctp_association *asoc)
/* Use this function for the packet prepend callback when no ECNE
* packet is desired (e.g. some packets don't like to be bundled).
*/
s
ctp_chunk_t
*
sctp_get_no_prepend
(
sctp_association_t
*
asoc
)
s
truct
sctp_chunk
*
sctp_get_no_prepend
(
struct
sctp_association
*
asoc
)
{
return
NULL
;
}
...
...
@@ -688,13 +713,14 @@ sctp_chunk_t *sctp_get_no_prepend(sctp_association_t *asoc)
/*
* Find which transport this TSN was sent on.
*/
struct
sctp_transport
*
sctp_assoc_lookup_tsn
(
sctp_association_t
*
asoc
,
__u32
tsn
)
struct
sctp_transport
*
sctp_assoc_lookup_tsn
(
struct
sctp_association
*
asoc
,
__u32
tsn
)
{
struct
sctp_transport
*
active
;
struct
sctp_transport
*
match
;
struct
list_head
*
entry
,
*
pos
;
struct
sctp_transport
*
transport
;
s
ctp_chunk_t
*
chunk
;
s
truct
sctp_chunk
*
chunk
;
__u32
key
=
htonl
(
tsn
);
match
=
NULL
;
...
...
@@ -717,7 +743,7 @@ struct sctp_transport *sctp_assoc_lookup_tsn(sctp_association_t *asoc, __u32 tsn
active
=
asoc
->
peer
.
active_path
;
list_for_each
(
entry
,
&
active
->
transmitted
)
{
chunk
=
list_entry
(
entry
,
s
ctp_chunk_t
,
transmitted_list
);
chunk
=
list_entry
(
entry
,
s
truct
sctp_chunk
,
transmitted_list
);
if
(
key
==
chunk
->
subh
.
data_hdr
->
tsn
)
{
match
=
active
;
...
...
@@ -732,7 +758,7 @@ struct sctp_transport *sctp_assoc_lookup_tsn(sctp_association_t *asoc, __u32 tsn
if
(
transport
==
active
)
break
;
list_for_each
(
entry
,
&
transport
->
transmitted
)
{
chunk
=
list_entry
(
entry
,
s
ctp_chunk_t
,
chunk
=
list_entry
(
entry
,
s
truct
sctp_chunk
,
transmitted_list
);
if
(
key
==
chunk
->
subh
.
data_hdr
->
tsn
)
{
match
=
transport
;
...
...
@@ -745,7 +771,7 @@ struct sctp_transport *sctp_assoc_lookup_tsn(sctp_association_t *asoc, __u32 tsn
}
/* Is this the association we are looking for? */
struct
sctp_transport
*
sctp_assoc_is_match
(
s
ctp_association_t
*
asoc
,
struct
sctp_transport
*
sctp_assoc_is_match
(
s
truct
sctp_association
*
asoc
,
const
union
sctp_addr
*
laddr
,
const
union
sctp_addr
*
paddr
)
{
...
...
@@ -771,10 +797,10 @@ struct sctp_transport *sctp_assoc_is_match(sctp_association_t *asoc,
}
/* Do delayed input processing. This is scheduled by sctp_rcv(). */
static
void
sctp_assoc_bh_rcv
(
s
ctp_association_t
*
asoc
)
static
void
sctp_assoc_bh_rcv
(
s
truct
sctp_association
*
asoc
)
{
s
ctp_endpoint_
t
*
ep
;
s
ctp_chunk_t
*
chunk
;
s
truct
sctp_endpoin
t
*
ep
;
s
truct
sctp_chunk
*
chunk
;
struct
sock
*
sk
;
struct
sctp_inq
*
inqueue
;
int
state
,
subtype
;
...
...
@@ -819,7 +845,7 @@ static void sctp_assoc_bh_rcv(sctp_association_t *asoc)
}
/* This routine moves an association from its old sk to a new sk. */
void
sctp_assoc_migrate
(
s
ctp_association_t
*
assoc
,
struct
sock
*
newsk
)
void
sctp_assoc_migrate
(
s
truct
sctp_association
*
assoc
,
struct
sock
*
newsk
)
{
struct
sctp_opt
*
newsp
=
sctp_sk
(
newsk
);
struct
sock
*
oldsk
=
assoc
->
base
.
sk
;
...
...
@@ -830,7 +856,7 @@ void sctp_assoc_migrate(sctp_association_t *assoc, struct sock *newsk)
list_del
(
&
assoc
->
asocs
);
/* Decrement the backlog value for a TCP-style socket. */
if
(
SCTP_SOCKET_TCP
==
sctp_sk
(
oldsk
)
->
type
)
if
(
sctp_style
(
oldsk
,
TCP
)
)
oldsk
->
ack_backlog
--
;
/* Release references to the old endpoint and the sock. */
...
...
@@ -850,7 +876,8 @@ void sctp_assoc_migrate(sctp_association_t *assoc, struct sock *newsk)
}
/* Update an association (possibly from unexpected COOKIE-ECHO processing). */
void
sctp_assoc_update
(
sctp_association_t
*
asoc
,
sctp_association_t
*
new
)
void
sctp_assoc_update
(
struct
sctp_association
*
asoc
,
struct
sctp_association
*
new
)
{
/* Copy in new parameters of peer. */
asoc
->
c
=
new
->
c
;
...
...
@@ -872,7 +899,7 @@ void sctp_assoc_update(sctp_association_t *asoc, sctp_association_t *new)
* current next_tsn in case data sent to peer
* has been discarded and needs retransmission.
*/
if
(
SCTP_STATE_ESTABLISHED
==
asoc
->
state
)
{
if
(
sctp_state
(
asoc
,
ESTABLISHED
)
)
{
asoc
->
next_tsn
=
new
->
next_tsn
;
asoc
->
ctsn_ack_point
=
new
->
ctsn_ack_point
;
...
...
@@ -898,7 +925,7 @@ void sctp_assoc_update(sctp_association_t *asoc, sctp_association_t *new)
* through the inactive transports as this is the next best thing
* we can try.
*/
void
sctp_assoc_update_retran_path
(
s
ctp_association_t
*
asoc
)
void
sctp_assoc_update_retran_path
(
s
truct
sctp_association
*
asoc
)
{
struct
sctp_transport
*
t
,
*
next
;
struct
list_head
*
head
=
&
asoc
->
peer
.
transport_addr_list
;
...
...
@@ -944,7 +971,8 @@ void sctp_assoc_update_retran_path(sctp_association_t *asoc)
}
/* Choose the transport for sending a SHUTDOWN packet. */
struct
sctp_transport
*
sctp_assoc_choose_shutdown_transport
(
sctp_association_t
*
asoc
)
struct
sctp_transport
*
sctp_assoc_choose_shutdown_transport
(
struct
sctp_association
*
asoc
)
{
/* If this is the first time SHUTDOWN is sent, use the active path,
* else use the retran path. If the last SHUTDOWN was sent over the
...
...
@@ -963,7 +991,7 @@ struct sctp_transport *sctp_assoc_choose_shutdown_transport(sctp_association_t *
/* Update the association's pmtu and frag_point by going through all the
* transports. This routine is called when a transport's PMTU has changed.
*/
void
sctp_assoc_sync_pmtu
(
s
ctp_association_t
*
asoc
)
void
sctp_assoc_sync_pmtu
(
s
truct
sctp_association
*
asoc
)
{
struct
sctp_transport
*
t
;
struct
list_head
*
pos
;
...
...
@@ -980,8 +1008,9 @@ void sctp_assoc_sync_pmtu(sctp_association_t *asoc)
}
if
(
pmtu
)
{
struct
sctp_opt
*
sp
=
sctp_sk
(
asoc
->
base
.
sk
);
asoc
->
pmtu
=
pmtu
;
asoc
->
frag_point
=
sctp_frag_point
(
pmtu
);
asoc
->
frag_point
=
sctp_frag_point
(
sp
,
pmtu
);
}
SCTP_DEBUG_PRINTK
(
"%s: asoc:%p, pmtu:%d, frag_point:%d
\n
"
,
...
...
@@ -1007,9 +1036,9 @@ static inline int sctp_peer_needs_update(struct sctp_association *asoc)
}
/* Increase asoc's rwnd by len and send any window update SACK if needed. */
void
sctp_assoc_rwnd_increase
(
s
ctp_association_t
*
asoc
,
int
len
)
void
sctp_assoc_rwnd_increase
(
s
truct
sctp_association
*
asoc
,
int
len
)
{
s
ctp_chunk_t
*
sack
;
s
truct
sctp_chunk
*
sack
;
struct
timer_list
*
timer
;
if
(
asoc
->
rwnd_over
)
{
...
...
@@ -1053,7 +1082,7 @@ void sctp_assoc_rwnd_increase(sctp_association_t *asoc, int len)
}
/* Decrease asoc's rwnd by len. */
void
sctp_assoc_rwnd_decrease
(
s
ctp_association_t
*
asoc
,
int
len
)
void
sctp_assoc_rwnd_decrease
(
s
truct
sctp_association
*
asoc
,
int
len
)
{
SCTP_ASSERT
(
asoc
->
rwnd
,
"rwnd zero"
,
return
);
SCTP_ASSERT
(
!
asoc
->
rwnd_over
,
"rwnd_over not zero"
,
return
);
...
...
@@ -1092,7 +1121,7 @@ int sctp_assoc_set_bind_addr_from_ep(struct sctp_association *asoc, int gfp)
}
/* Build the association's bind address list from the cookie. */
int
sctp_assoc_set_bind_addr_from_cookie
(
s
ctp_association_t
*
asoc
,
int
sctp_assoc_set_bind_addr_from_cookie
(
s
truct
sctp_association
*
asoc
,
sctp_cookie_t
*
cookie
,
int
gfp
)
{
int
var_size2
=
ntohs
(
cookie
->
peer_init
->
chunk_hdr
.
length
);
...
...
net/sctp/bind_addr.c
View file @
3e446c25
...
...
@@ -52,16 +52,17 @@
#include <net/sctp/sm.h>
/* Forward declarations for internal helpers. */
static
int
sctp_copy_one_addr
(
s
ctp_bind_addr_t
*
,
union
sctp_addr
*
,
static
int
sctp_copy_one_addr
(
s
truct
sctp_bind_addr
*
,
union
sctp_addr
*
,
sctp_scope_t
scope
,
int
gfp
,
int
flags
);
static
void
sctp_bind_addr_clean
(
s
ctp_bind_addr_t
*
);
static
void
sctp_bind_addr_clean
(
s
truct
sctp_bind_addr
*
);
/* First Level Abstractions. */
/* Copy 'src' to 'dest' taking 'scope' into account. Omit addresses
* in 'src' which have a broader scope than 'scope'.
*/
int
sctp_bind_addr_copy
(
sctp_bind_addr_t
*
dest
,
const
sctp_bind_addr_t
*
src
,
int
sctp_bind_addr_copy
(
struct
sctp_bind_addr
*
dest
,
const
struct
sctp_bind_addr
*
src
,
sctp_scope_t
scope
,
int
gfp
,
int
flags
)
{
struct
sockaddr_storage_list
*
addr
;
...
...
@@ -80,6 +81,22 @@ int sctp_bind_addr_copy(sctp_bind_addr_t *dest, const sctp_bind_addr_t *src,
goto
out
;
}
/* If there are no addresses matching the scope and
* this is global scope, try to get a link scope address, with
* the assumption that we must be sitting behind a NAT.
*/
if
(
list_empty
(
&
dest
->
address_list
)
&&
(
SCTP_SCOPE_GLOBAL
==
scope
))
{
list_for_each
(
pos
,
&
src
->
address_list
)
{
addr
=
list_entry
(
pos
,
struct
sockaddr_storage_list
,
list
);
error
=
sctp_copy_one_addr
(
dest
,
&
addr
->
a
,
SCTP_SCOPE_LINK
,
gfp
,
flags
);
if
(
error
<
0
)
goto
out
;
}
}
out:
if
(
error
)
sctp_bind_addr_clean
(
dest
);
...
...
@@ -88,11 +105,11 @@ int sctp_bind_addr_copy(sctp_bind_addr_t *dest, const sctp_bind_addr_t *src,
}
/* Create a new SCTP_bind_addr from nothing. */
s
ctp_bind_addr_t
*
sctp_bind_addr_new
(
int
gfp
)
s
truct
sctp_bind_addr
*
sctp_bind_addr_new
(
int
gfp
)
{
s
ctp_bind_addr_t
*
retval
;
s
truct
sctp_bind_addr
*
retval
;
retval
=
t_new
(
s
ctp_bind_addr_t
,
gfp
);
retval
=
t_new
(
s
truct
sctp_bind_addr
,
gfp
);
if
(
!
retval
)
goto
nomem
;
...
...
@@ -107,7 +124,7 @@ sctp_bind_addr_t *sctp_bind_addr_new(int gfp)
/* Initialize the SCTP_bind_addr structure for either an endpoint or
* an association.
*/
void
sctp_bind_addr_init
(
s
ctp_bind_addr_t
*
bp
,
__u16
port
)
void
sctp_bind_addr_init
(
s
truct
sctp_bind_addr
*
bp
,
__u16
port
)
{
bp
->
malloced
=
0
;
...
...
@@ -116,7 +133,7 @@ void sctp_bind_addr_init(sctp_bind_addr_t *bp, __u16 port)
}
/* Dispose of the address list. */
static
void
sctp_bind_addr_clean
(
s
ctp_bind_addr_t
*
bp
)
static
void
sctp_bind_addr_clean
(
s
truct
sctp_bind_addr
*
bp
)
{
struct
sockaddr_storage_list
*
addr
;
struct
list_head
*
pos
,
*
temp
;
...
...
@@ -131,7 +148,7 @@ static void sctp_bind_addr_clean(sctp_bind_addr_t *bp)
}
/* Dispose of an SCTP_bind_addr structure */
void
sctp_bind_addr_free
(
s
ctp_bind_addr_t
*
bp
)
void
sctp_bind_addr_free
(
s
truct
sctp_bind_addr
*
bp
)
{
/* Empty the bind address list. */
sctp_bind_addr_clean
(
bp
);
...
...
@@ -143,7 +160,7 @@ void sctp_bind_addr_free(sctp_bind_addr_t *bp)
}
/* Add an address to the bind address list in the SCTP_bind_addr structure. */
int
sctp_add_bind_addr
(
s
ctp_bind_addr_t
*
bp
,
union
sctp_addr
*
new
,
int
sctp_add_bind_addr
(
s
truct
sctp_bind_addr
*
bp
,
union
sctp_addr
*
new
,
int
gfp
)
{
struct
sockaddr_storage_list
*
addr
;
...
...
@@ -171,7 +188,7 @@ int sctp_add_bind_addr(sctp_bind_addr_t *bp, union sctp_addr *new,
/* Delete an address from the bind address list in the SCTP_bind_addr
* structure.
*/
int
sctp_del_bind_addr
(
s
ctp_bind_addr_t
*
bp
,
union
sctp_addr
*
del_addr
)
int
sctp_del_bind_addr
(
s
truct
sctp_bind_addr
*
bp
,
union
sctp_addr
*
del_addr
)
{
struct
list_head
*
pos
,
*
temp
;
struct
sockaddr_storage_list
*
addr
;
...
...
@@ -196,7 +213,7 @@ int sctp_del_bind_addr(sctp_bind_addr_t *bp, union sctp_addr *del_addr)
*
* The second argument is the return value for the length.
*/
union
sctp_params
sctp_bind_addrs_to_raw
(
const
s
ctp_bind_addr_t
*
bp
,
union
sctp_params
sctp_bind_addrs_to_raw
(
const
s
truct
sctp_bind_addr
*
bp
,
int
*
addrs_len
,
int
gfp
)
{
union
sctp_params
addrparms
;
...
...
@@ -214,6 +231,14 @@ union sctp_params sctp_bind_addrs_to_raw(const sctp_bind_addr_t *bp,
len
+=
sizeof
(
sctp_addr_param_t
);
}
/* Don't even bother embedding an address if there
* is only one.
*/
if
(
len
==
sizeof
(
sctp_addr_param_t
))
{
retval
.
v
=
NULL
;
goto
end_raw
;
}
retval
.
v
=
kmalloc
(
len
,
gfp
);
if
(
!
retval
.
v
)
goto
end_raw
;
...
...
@@ -237,7 +262,7 @@ union sctp_params sctp_bind_addrs_to_raw(const sctp_bind_addr_t *bp,
* Create an address list out of the raw address list format (IPv4 and IPv6
* address parameters).
*/
int
sctp_raw_to_bind_addrs
(
s
ctp_bind_addr_t
*
bp
,
__u8
*
raw_addr_list
,
int
sctp_raw_to_bind_addrs
(
s
truct
sctp_bind_addr
*
bp
,
__u8
*
raw_addr_list
,
int
addrs_len
,
__u16
port
,
int
gfp
)
{
sctp_addr_param_t
*
rawaddr
;
...
...
@@ -283,7 +308,8 @@ int sctp_raw_to_bind_addrs(sctp_bind_addr_t *bp, __u8 *raw_addr_list,
********************************************************************/
/* Does this contain a specified address? Allow wildcarding. */
int
sctp_bind_addr_match
(
sctp_bind_addr_t
*
bp
,
const
union
sctp_addr
*
addr
,
int
sctp_bind_addr_match
(
struct
sctp_bind_addr
*
bp
,
const
union
sctp_addr
*
addr
,
struct
sctp_opt
*
opt
)
{
struct
sockaddr_storage_list
*
laddr
;
...
...
@@ -299,7 +325,8 @@ int sctp_bind_addr_match(sctp_bind_addr_t *bp, const union sctp_addr *addr,
}
/* Copy out addresses from the global local address list. */
static
int
sctp_copy_one_addr
(
sctp_bind_addr_t
*
dest
,
union
sctp_addr
*
addr
,
static
int
sctp_copy_one_addr
(
struct
sctp_bind_addr
*
dest
,
union
sctp_addr
*
addr
,
sctp_scope_t
scope
,
int
gfp
,
int
flags
)
{
struct
sctp_protocol
*
proto
=
sctp_get_protocol
();
...
...
net/sctp/chunk.c
0 → 100644
View file @
3e446c25
/* SCTP kernel reference Implementation
* Copyright (c) 2003 International Business Machines Corp.
*
* This file is part of the SCTP kernel reference Implementation
*
* This file contains the code relating the the chunk abstraction.
*
* The SCTP reference implementation is free software;
* you can redistribute it and/or modify it under the terms of
* the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* The SCTP reference implementation is distributed in the hope that it
* will be useful, but WITHOUT ANY WARRANTY; without even the implied
* ************************
* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with GNU CC; see the file COPYING. If not, write to
* the Free Software Foundation, 59 Temple Place - Suite 330,
* Boston, MA 02111-1307, USA.
*
* Please send any bug reports or fixes you make to the
* email address(es):
* lksctp developers <lksctp-developers@lists.sourceforge.net>
*
* Or submit a bug report through the following website:
* http://www.sf.net/projects/lksctp
*
* Written or modified by:
* Jon Grimm <jgrimm@us.ibm.com>
*
* Any bugs reported given to us we will try to fix... any fixes shared will
* be incorporated into the next SCTP release.
*/
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/net.h>
#include <linux/inet.h>
#include <linux/skbuff.h>
#include <net/sock.h>
#include <net/sctp/sctp.h>
#include <net/sctp/sm.h>
/* This file is mostly in anticipation of future work, but initially
* populate with fragment tracking for an outbound message.
*/
/* Initialize datamsg from memory. */
void
sctp_datamsg_init
(
struct
sctp_datamsg
*
msg
)
{
atomic_set
(
&
msg
->
refcnt
,
1
);
msg
->
send_failed
=
0
;
msg
->
send_error
=
0
;
msg
->
can_expire
=
0
;
INIT_LIST_HEAD
(
&
msg
->
chunks
);
}
/* Allocate and initialize datamsg. */
struct
sctp_datamsg
*
sctp_datamsg_new
(
int
gfp
)
{
struct
sctp_datamsg
*
msg
;
msg
=
kmalloc
(
sizeof
(
struct
sctp_datamsg
),
gfp
);
if
(
msg
)
sctp_datamsg_init
(
msg
);
SCTP_DBG_OBJCNT_INC
(
datamsg
);
return
msg
;
}
/* Final destructruction of datamsg memory. */
static
void
sctp_datamsg_destroy
(
struct
sctp_datamsg
*
msg
)
{
struct
list_head
*
pos
,
*
temp
;
struct
sctp_chunk
*
chunk
;
struct
sctp_opt
*
sp
;
struct
sctp_ulpevent
*
ev
;
struct
sctp_association
*
asoc
=
NULL
;
int
error
=
0
,
notify
;
/* If we failed, we may need to notify. */
notify
=
msg
->
send_failed
?
-
1
:
0
;
/* Release all references. */
list_for_each_safe
(
pos
,
temp
,
&
msg
->
chunks
)
{
list_del
(
pos
);
chunk
=
list_entry
(
pos
,
struct
sctp_chunk
,
frag_list
);
/* Check whether we _really_ need to notify. */
if
(
notify
<
0
)
{
asoc
=
chunk
->
asoc
;
if
(
msg
->
send_error
)
error
=
msg
->
send_error
;
else
error
=
asoc
->
outqueue
.
error
;
sp
=
sctp_sk
(
asoc
->
base
.
sk
);
notify
=
sctp_ulpevent_type_enabled
(
SCTP_SEND_FAILED
,
&
sp
->
subscribe
);
}
/* Generate a SEND FAILED event only if enabled. */
if
(
notify
>
0
)
{
int
sent
;
if
(
chunk
->
has_tsn
)
sent
=
SCTP_DATA_SENT
;
else
sent
=
SCTP_DATA_UNSENT
;
ev
=
sctp_ulpevent_make_send_failed
(
asoc
,
chunk
,
sent
,
error
,
GFP_ATOMIC
);
if
(
ev
)
sctp_ulpq_tail_event
(
&
asoc
->
ulpq
,
ev
);
}
sctp_chunk_put
(
chunk
);
}
SCTP_DBG_OBJCNT_DEC
(
datamsg
);
kfree
(
msg
);
}
/* Hold a reference. */
void
sctp_datamsg_hold
(
struct
sctp_datamsg
*
msg
)
{
atomic_inc
(
&
msg
->
refcnt
);
}
/* Release a reference. */
void
sctp_datamsg_put
(
struct
sctp_datamsg
*
msg
)
{
if
(
atomic_dec_and_test
(
&
msg
->
refcnt
))
sctp_datamsg_destroy
(
msg
);
}
/* Free a message. Really just give up a reference, the
* really free happens in sctp_datamsg_destroy().
*/
void
sctp_datamsg_free
(
struct
sctp_datamsg
*
msg
)
{
sctp_datamsg_put
(
msg
);
}
/* Hold on to all the fragments until all chunks have been sent. */
void
sctp_datamsg_track
(
struct
sctp_chunk
*
chunk
)
{
sctp_chunk_hold
(
chunk
);
}
/* Assign a chunk to this datamsg. */
void
sctp_datamsg_assign
(
struct
sctp_datamsg
*
msg
,
struct
sctp_chunk
*
chunk
)
{
sctp_datamsg_hold
(
msg
);
chunk
->
msg
=
msg
;
}
/* A data chunk can have a maximum payload of (2^16 - 20). Break
* down any such message into smaller chunks. Opportunistically, fragment
* the chunks down to the current MTU constraints. We may get refragmented
* later if the PMTU changes, but it is _much better_ to fragment immediately
* with a reasonable guess than always doing our fragmentation on the
* soft-interrupt.
*/
struct
sctp_datamsg
*
sctp_datamsg_from_user
(
struct
sctp_association
*
asoc
,
struct
sctp_sndrcvinfo
*
sinfo
,
struct
msghdr
*
msgh
,
int
msg_len
)
{
int
max
,
whole
,
i
,
offset
,
over
,
err
;
int
len
,
first_len
;
struct
sctp_chunk
*
chunk
;
struct
sctp_datamsg
*
msg
;
struct
list_head
*
pos
,
*
temp
;
__u8
frag
;
msg
=
sctp_datamsg_new
(
GFP_KERNEL
);
if
(
!
msg
)
return
NULL
;
/* Note: Calculate this outside of the loop, so that all fragments
* have the same expiration.
*/
if
(
sinfo
->
sinfo_timetolive
)
{
struct
timeval
tv
;
__u32
ttl
=
sinfo
->
sinfo_timetolive
;
/* sinfo_timetolive is in milliseconds */
tv
.
tv_sec
=
ttl
/
1000
;
tv
.
tv_usec
=
ttl
%
1000
*
1000
;
msg
->
expires_at
=
jiffies
+
timeval_to_jiffies
(
&
tv
);
msg
->
can_expire
=
1
;
}
/* What is a reasonable fragmentation point right now? */
max
=
asoc
->
pmtu
;
if
(
max
<
SCTP_MIN_PMTU
)
max
=
SCTP_MIN_PMTU
;
max
-=
SCTP_IP_OVERHEAD
;
/* Make sure not beyond maximum chunk size. */
if
(
max
>
SCTP_MAX_CHUNK_LEN
)
max
=
SCTP_MAX_CHUNK_LEN
;
/* Subtract out the overhead of a data chunk header. */
max
-=
sizeof
(
struct
sctp_data_chunk
);
whole
=
0
;
/* If user has specified smaller fragmentation, make it so. */
if
(
sctp_sk
(
asoc
->
base
.
sk
)
->
user_frag
)
max
=
min_t
(
int
,
max
,
sctp_sk
(
asoc
->
base
.
sk
)
->
user_frag
);
first_len
=
max
;
/* Encourage Cookie-ECHO bundling. */
if
(
asoc
->
state
<
SCTP_STATE_COOKIE_ECHOED
)
{
whole
=
msg_len
/
(
max
-
SCTP_ARBITRARY_COOKIE_ECHO_LEN
);
/* Account for the DATA to be bundled with the COOKIE-ECHO. */
if
(
whole
)
{
first_len
=
max
-
SCTP_ARBITRARY_COOKIE_ECHO_LEN
;
msg_len
-=
first_len
;
whole
=
1
;
}
}
/* How many full sized? How many bytes leftover? */
whole
+=
msg_len
/
max
;
over
=
msg_len
%
max
;
offset
=
0
;
if
((
whole
>
1
)
||
(
whole
&&
over
))
SCTP_INC_STATS_USER
(
SctpFragUsrMsgs
);
/* Create chunks for all the full sized DATA chunks. */
for
(
i
=
0
,
len
=
first_len
;
i
<
whole
;
i
++
)
{
frag
=
SCTP_DATA_MIDDLE_FRAG
;
if
(
0
==
i
)
frag
|=
SCTP_DATA_FIRST_FRAG
;
if
((
i
==
(
whole
-
1
))
&&
!
over
)
frag
|=
SCTP_DATA_LAST_FRAG
;
chunk
=
sctp_make_datafrag_empty
(
asoc
,
sinfo
,
len
,
frag
,
0
);
if
(
!
chunk
)
goto
errout
;
err
=
sctp_user_addto_chunk
(
chunk
,
offset
,
len
,
msgh
->
msg_iov
);
if
(
err
<
0
)
goto
errout
;
offset
+=
len
;
/* Put the chunk->skb back into the form expected by send. */
__skb_pull
(
chunk
->
skb
,
(
__u8
*
)
chunk
->
chunk_hdr
-
(
__u8
*
)
chunk
->
skb
->
data
);
sctp_datamsg_assign
(
msg
,
chunk
);
list_add_tail
(
&
chunk
->
frag_list
,
&
msg
->
chunks
);
/* The first chunk, the first chunk was likely short
* to allow bundling, so reset to full size.
*/
if
(
0
==
i
)
len
=
max
;
}
/* .. now the leftover bytes. */
if
(
over
)
{
if
(
!
whole
)
frag
=
SCTP_DATA_NOT_FRAG
;
else
frag
=
SCTP_DATA_LAST_FRAG
;
chunk
=
sctp_make_datafrag_empty
(
asoc
,
sinfo
,
over
,
frag
,
0
);
if
(
!
chunk
)
goto
errout
;
err
=
sctp_user_addto_chunk
(
chunk
,
offset
,
over
,
msgh
->
msg_iov
);
/* Put the chunk->skb back into the form expected by send. */
__skb_pull
(
chunk
->
skb
,
(
__u8
*
)
chunk
->
chunk_hdr
-
(
__u8
*
)
chunk
->
skb
->
data
);
if
(
err
<
0
)
goto
errout
;
sctp_datamsg_assign
(
msg
,
chunk
);
list_add_tail
(
&
chunk
->
frag_list
,
&
msg
->
chunks
);
}
return
msg
;
errout:
list_for_each_safe
(
pos
,
temp
,
&
msg
->
chunks
)
{
list_del
(
pos
);
chunk
=
list_entry
(
pos
,
struct
sctp_chunk
,
frag_list
);
sctp_chunk_free
(
chunk
);
}
sctp_datamsg_free
(
msg
);
return
NULL
;
}
/* Check whether this message has expired. */
int
sctp_datamsg_expires
(
struct
sctp_chunk
*
chunk
)
{
struct
sctp_datamsg
*
msg
=
chunk
->
msg
;
/* FIXME: When PR-SCTP is supported we can make this
* check more lenient.
*/
if
(
!
msg
->
can_expire
)
return
0
;
if
(
time_after
(
jiffies
,
msg
->
expires_at
))
return
1
;
return
0
;
}
/* This chunk (and consequently entire message) has failed in its sending. */
void
sctp_datamsg_fail
(
struct
sctp_chunk
*
chunk
,
int
error
)
{
chunk
->
msg
->
send_failed
=
1
;
chunk
->
msg
->
send_error
=
error
;
}
net/sctp/command.c
View file @
3e446c25
...
...
@@ -43,9 +43,9 @@
#include <net/sctp/sm.h>
/* Create a new sctp_command_sequence. */
sctp_cmd_seq_t
*
sctp_new_cmd_seq
(
int
priority
)
sctp_cmd_seq_t
*
sctp_new_cmd_seq
(
int
gfp
)
{
sctp_cmd_seq_t
*
retval
=
t_new
(
sctp_cmd_seq_t
,
priority
);
sctp_cmd_seq_t
*
retval
=
t_new
(
sctp_cmd_seq_t
,
gfp
);
if
(
retval
)
sctp_init_cmd_seq
(
retval
);
...
...
net/sctp/crc32c.c
View file @
3e446c25
...
...
@@ -170,6 +170,23 @@ __u32 sctp_update_cksum(__u8 *buffer, __u16 length, __u32 crc32)
return
crc32
;
}
__u32
sctp_update_copy_cksum
(
__u8
*
to
,
__u8
*
from
,
__u16
length
,
__u32
crc32
)
{
__u32
i
;
__u32
*
_to
=
(
__u32
*
)
to
;
__u32
*
_from
=
(
__u32
*
)
from
;
for
(
i
=
0
;
i
<
(
length
/
4
);
i
++
)
{
_to
[
i
]
=
_from
[
i
];
CRC32C
(
crc32
,
from
[
i
*
4
]);
CRC32C
(
crc32
,
from
[
i
*
4
+
1
]);
CRC32C
(
crc32
,
from
[
i
*
4
+
2
]);
CRC32C
(
crc32
,
from
[
i
*
4
+
3
]);
}
return
crc32
;
}
__u32
sctp_end_cksum
(
__u32
crc32
)
{
__u32
result
;
...
...
net/sctp/endpointola.c
View file @
3e446c25
...
...
@@ -54,27 +54,27 @@
#include <linux/slab.h>
#include <linux/in.h>
#include <linux/random.h>
/* get_random_bytes() */
#include <linux/crypto.h>
#include <net/sock.h>
#include <net/ipv6.h>
#include <net/sctp/sctp.h>
#include <net/sctp/sm.h>
/* Forward declarations for internal helpers. */
static
void
sctp_endpoint_bh_rcv
(
s
ctp_endpoint_
t
*
ep
);
static
void
sctp_endpoint_bh_rcv
(
s
truct
sctp_endpoin
t
*
ep
);
/* Create a sctp_endpoint
_t
with all that boring stuff initialized.
/* Create a sctp_endpoint with all that boring stuff initialized.
* Returns NULL if there isn't enough memory.
*/
sctp_endpoint_t
*
sctp_endpoint_new
(
struct
sctp_protocol
*
proto
,
struct
sock
*
sk
,
int
priority
)
struct
sctp_endpoint
*
sctp_endpoint_new
(
struct
sock
*
sk
,
int
gfp
)
{
s
ctp_endpoint_
t
*
ep
;
s
truct
sctp_endpoin
t
*
ep
;
/* Build a local endpoint. */
ep
=
t_new
(
s
ctp_endpoint_t
,
priority
);
ep
=
t_new
(
s
truct
sctp_endpoint
,
gfp
);
if
(
!
ep
)
goto
fail
;
if
(
!
sctp_endpoint_init
(
ep
,
proto
,
sk
,
priority
))
if
(
!
sctp_endpoint_init
(
ep
,
sk
,
gfp
))
goto
fail_init
;
ep
->
base
.
malloced
=
1
;
SCTP_DBG_OBJCNT_INC
(
ep
);
...
...
@@ -89,12 +89,11 @@ sctp_endpoint_t *sctp_endpoint_new(struct sctp_protocol *proto,
/*
* Initialize the base fields of the endpoint structure.
*/
sctp_endpoint_t
*
sctp_endpoint_init
(
sctp_endpoint_t
*
ep
,
struct
sctp_protocol
*
proto
,
struct
sock
*
sk
,
int
priority
)
struct
sctp_endpoint
*
sctp_endpoint_init
(
struct
sctp_endpoint
*
ep
,
struct
sock
*
sk
,
int
gfp
)
{
struct
sctp_opt
*
sp
=
sctp_sk
(
sk
);
memset
(
ep
,
0
,
sizeof
(
s
ctp_endpoint_
t
));
memset
(
ep
,
0
,
sizeof
(
s
truct
sctp_endpoin
t
));
/* Initialize the base structure. */
/* What type of endpoint are we? */
...
...
@@ -110,8 +109,7 @@ sctp_endpoint_t *sctp_endpoint_init(sctp_endpoint_t *ep,
/* Set its top-half handler */
sctp_inq_set_th_handler
(
&
ep
->
base
.
inqueue
,
(
void
(
*
)(
void
*
))
sctp_endpoint_bh_rcv
,
ep
);
(
void
(
*
)(
void
*
))
sctp_endpoint_bh_rcv
,
ep
);
/* Initialize the bind addr area */
sctp_bind_addr_init
(
&
ep
->
base
.
bind_addr
,
0
);
...
...
@@ -121,21 +119,16 @@ sctp_endpoint_t *sctp_endpoint_init(sctp_endpoint_t *ep,
ep
->
base
.
sk
=
sk
;
sock_hold
(
ep
->
base
.
sk
);
/* This pointer is useful to access the default protocol parameter
* values.
*/
ep
->
proto
=
proto
;
/* Create the lists of associations. */
INIT_LIST_HEAD
(
&
ep
->
asocs
);
/* Set up the base timeout information. */
ep
->
timeouts
[
SCTP_EVENT_TIMEOUT_NONE
]
=
0
;
ep
->
timeouts
[
SCTP_EVENT_TIMEOUT_T1_COOKIE
]
=
ep
->
timeouts
[
SCTP_EVENT_TIMEOUT_T1_COOKIE
]
=
SCTP_DEFAULT_TIMEOUT_T1_COOKIE
;
ep
->
timeouts
[
SCTP_EVENT_TIMEOUT_T1_INIT
]
=
ep
->
timeouts
[
SCTP_EVENT_TIMEOUT_T1_INIT
]
=
SCTP_DEFAULT_TIMEOUT_T1_INIT
;
ep
->
timeouts
[
SCTP_EVENT_TIMEOUT_T2_SHUTDOWN
]
=
ep
->
timeouts
[
SCTP_EVENT_TIMEOUT_T2_SHUTDOWN
]
=
sp
->
rtoinfo
.
srto_initial
;
ep
->
timeouts
[
SCTP_EVENT_TIMEOUT_T3_RTX
]
=
0
;
...
...
@@ -146,11 +139,11 @@ sctp_endpoint_t *sctp_endpoint_init(sctp_endpoint_t *ep,
ep
->
timeouts
[
SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD
]
=
5
*
sp
->
rtoinfo
.
srto_max
;
ep
->
timeouts
[
SCTP_EVENT_TIMEOUT_HEARTBEAT
]
=
ep
->
timeouts
[
SCTP_EVENT_TIMEOUT_HEARTBEAT
]
=
SCTP_DEFAULT_TIMEOUT_HEARTBEAT
;
ep
->
timeouts
[
SCTP_EVENT_TIMEOUT_SACK
]
=
ep
->
timeouts
[
SCTP_EVENT_TIMEOUT_SACK
]
=
SCTP_DEFAULT_TIMEOUT_SACK
;
ep
->
timeouts
[
SCTP_EVENT_TIMEOUT_AUTOCLOSE
]
=
ep
->
timeouts
[
SCTP_EVENT_TIMEOUT_AUTOCLOSE
]
=
sp
->
autoclose
*
HZ
;
/* Set up the default send/receive buffer space. */
...
...
@@ -175,7 +168,8 @@ sctp_endpoint_t *sctp_endpoint_init(sctp_endpoint_t *ep,
}
/* Add an association to an endpoint. */
void
sctp_endpoint_add_asoc
(
sctp_endpoint_t
*
ep
,
sctp_association_t
*
asoc
)
void
sctp_endpoint_add_asoc
(
struct
sctp_endpoint
*
ep
,
struct
sctp_association
*
asoc
)
{
struct
sock
*
sk
=
ep
->
base
.
sk
;
...
...
@@ -183,22 +177,21 @@ void sctp_endpoint_add_asoc(sctp_endpoint_t *ep, sctp_association_t *asoc)
list_add_tail
(
&
asoc
->
asocs
,
&
ep
->
asocs
);
/* Increment the backlog value for a TCP-style listening socket. */
if
((
SCTP_SOCKET_TCP
==
sctp_sk
(
sk
)
->
type
)
&&
(
SCTP_SS_LISTENING
==
sk
->
state
))
if
(
sctp_style
(
sk
,
TCP
)
&&
sctp_sstate
(
sk
,
LISTENING
))
sk
->
ack_backlog
++
;
}
/* Free the endpoint structure. Delay cleanup until
* all users have released their reference count on this structure.
*/
void
sctp_endpoint_free
(
s
ctp_endpoint_
t
*
ep
)
void
sctp_endpoint_free
(
s
truct
sctp_endpoin
t
*
ep
)
{
ep
->
base
.
dead
=
1
;
sctp_endpoint_put
(
ep
);
}
/* Final destructor for endpoint. */
void
sctp_endpoint_destroy
(
s
ctp_endpoint_
t
*
ep
)
void
sctp_endpoint_destroy
(
s
truct
sctp_endpoin
t
*
ep
)
{
SCTP_ASSERT
(
ep
->
base
.
dead
,
"Endpoint is not dead"
,
return
);
...
...
@@ -207,9 +200,12 @@ void sctp_endpoint_destroy(sctp_endpoint_t *ep)
/* Unlink this endpoint, so we can't find it again! */
sctp_unhash_endpoint
(
ep
);
/* Cleanup the inqueue. */
sctp_inq_free
(
&
ep
->
base
.
inqueue
);
/* Free up the HMAC transform. */
if
(
sctp_sk
(
ep
->
base
.
sk
)
->
hmac
)
sctp_crypto_free_tfm
(
sctp_sk
(
ep
->
base
.
sk
)
->
hmac
);
/* Cleanup. */
sctp_inq_free
(
&
ep
->
base
.
inqueue
);
sctp_bind_addr_free
(
&
ep
->
base
.
bind_addr
);
/* Remove and free the port */
...
...
@@ -228,7 +224,7 @@ void sctp_endpoint_destroy(sctp_endpoint_t *ep)
}
/* Hold a reference to an endpoint. */
void
sctp_endpoint_hold
(
s
ctp_endpoint_
t
*
ep
)
void
sctp_endpoint_hold
(
s
truct
sctp_endpoin
t
*
ep
)
{
atomic_inc
(
&
ep
->
base
.
refcnt
);
}
...
...
@@ -236,17 +232,17 @@ void sctp_endpoint_hold(sctp_endpoint_t *ep)
/* Release a reference to an endpoint and clean up if there are
* no more references.
*/
void
sctp_endpoint_put
(
s
ctp_endpoint_
t
*
ep
)
void
sctp_endpoint_put
(
s
truct
sctp_endpoin
t
*
ep
)
{
if
(
atomic_dec_and_test
(
&
ep
->
base
.
refcnt
))
sctp_endpoint_destroy
(
ep
);
}
/* Is this the endpoint we are looking for? */
s
ctp_endpoint_t
*
sctp_endpoint_is_match
(
sctp_endpoint_
t
*
ep
,
const
union
sctp_addr
*
laddr
)
s
truct
sctp_endpoint
*
sctp_endpoint_is_match
(
struct
sctp_endpoin
t
*
ep
,
const
union
sctp_addr
*
laddr
)
{
s
ctp_endpoint_
t
*
retval
;
s
truct
sctp_endpoin
t
*
retval
;
sctp_read_lock
(
&
ep
->
base
.
addr_lock
);
if
(
ep
->
base
.
bind_addr
.
port
==
laddr
->
v4
.
sin_port
)
{
...
...
@@ -268,19 +264,19 @@ sctp_endpoint_t *sctp_endpoint_is_match(sctp_endpoint_t *ep,
* We do a linear search of the associations for this endpoint.
* We return the matching transport address too.
*/
s
ctp_association_t
*
__sctp_endpoint_lookup_assoc
(
const
s
ctp_endpoint_t
*
endpoint
,
s
truct
sctp_association
*
__sctp_endpoint_lookup_assoc
(
const
s
truct
sctp_endpoint
*
ep
,
const
union
sctp_addr
*
paddr
,
struct
sctp_transport
**
transport
)
{
int
rport
;
s
ctp_association_t
*
asoc
;
s
truct
sctp_association
*
asoc
;
struct
list_head
*
pos
;
rport
=
paddr
->
v4
.
sin_port
;
list_for_each
(
pos
,
&
e
ndpoint
->
asocs
)
{
asoc
=
list_entry
(
pos
,
s
ctp_association_t
,
asocs
);
list_for_each
(
pos
,
&
e
p
->
asocs
)
{
asoc
=
list_entry
(
pos
,
s
truct
sctp_association
,
asocs
);
if
(
rport
==
asoc
->
peer
.
port
)
{
sctp_read_lock
(
&
asoc
->
base
.
addr_lock
);
*
transport
=
sctp_assoc_lookup_paddr
(
asoc
,
paddr
);
...
...
@@ -296,12 +292,12 @@ sctp_association_t *__sctp_endpoint_lookup_assoc(
}
/* Lookup association on an endpoint based on a peer address. BH-safe. */
s
ctp_association_t
*
sctp_endpoint_lookup_assoc
(
const
s
ctp_endpoint_
t
*
ep
,
s
truct
sctp_association
*
sctp_endpoint_lookup_assoc
(
const
s
truct
sctp_endpoin
t
*
ep
,
const
union
sctp_addr
*
paddr
,
struct
sctp_transport
**
transport
)
{
s
ctp_association_t
*
asoc
;
s
truct
sctp_association
*
asoc
;
sctp_local_bh_disable
();
asoc
=
__sctp_endpoint_lookup_assoc
(
ep
,
paddr
,
transport
);
...
...
@@ -313,12 +309,12 @@ sctp_association_t *sctp_endpoint_lookup_assoc(
/* Look for any peeled off association from the endpoint that matches the
* given peer address.
*/
int
sctp_endpoint_is_peeled_off
(
s
ctp_endpoint_
t
*
ep
,
int
sctp_endpoint_is_peeled_off
(
s
truct
sctp_endpoin
t
*
ep
,
const
union
sctp_addr
*
paddr
)
{
struct
list_head
*
pos
;
struct
sockaddr_storage_list
*
addr
;
s
ctp_bind_addr_t
*
bp
;
s
truct
sctp_bind_addr
*
bp
;
sctp_read_lock
(
&
ep
->
base
.
addr_lock
);
bp
=
&
ep
->
base
.
bind_addr
;
...
...
@@ -337,12 +333,12 @@ int sctp_endpoint_is_peeled_off(sctp_endpoint_t *ep,
/* Do delayed input processing. This is scheduled by sctp_rcv().
* This may be called on BH or task time.
*/
static
void
sctp_endpoint_bh_rcv
(
s
ctp_endpoint_
t
*
ep
)
static
void
sctp_endpoint_bh_rcv
(
s
truct
sctp_endpoin
t
*
ep
)
{
s
ctp_association_t
*
asoc
;
s
truct
sctp_association
*
asoc
;
struct
sock
*
sk
;
struct
sctp_transport
*
transport
;
s
ctp_chunk_t
*
chunk
;
s
truct
sctp_chunk
*
chunk
;
struct
sctp_inq
*
inqueue
;
sctp_subtype_t
subtype
;
sctp_state_t
state
;
...
...
@@ -355,7 +351,7 @@ static void sctp_endpoint_bh_rcv(sctp_endpoint_t *ep)
inqueue
=
&
ep
->
base
.
inqueue
;
sk
=
ep
->
base
.
sk
;
while
(
NULL
!=
(
chunk
=
sctp_inq_pop
(
inqueue
)))
{
while
(
NULL
!=
(
chunk
=
sctp_inq_pop
(
inqueue
)))
{
subtype
.
chunk
=
chunk
->
chunk_hdr
->
type
;
/* We might have grown an association since last we
...
...
net/sctp/input.c
View file @
3e446c25
...
...
@@ -63,11 +63,11 @@
/* Forward declarations for internal helpers. */
static
int
sctp_rcv_ootb
(
struct
sk_buff
*
);
s
ctp_association_t
*
__sctp_rcv_lookup
(
struct
sk_buff
*
skb
,
s
truct
sctp_association
*
__sctp_rcv_lookup
(
struct
sk_buff
*
skb
,
const
union
sctp_addr
*
laddr
,
const
union
sctp_addr
*
paddr
,
struct
sctp_transport
**
transportp
);
s
ctp_endpoint_
t
*
__sctp_rcv_lookup_endpoint
(
const
union
sctp_addr
*
laddr
);
s
truct
sctp_endpoin
t
*
__sctp_rcv_lookup_endpoint
(
const
union
sctp_addr
*
laddr
);
/* Calculate the SCTP checksum of an SCTP packet. */
...
...
@@ -102,11 +102,11 @@ static inline int sctp_rcv_checksum(struct sk_buff *skb)
int
sctp_rcv
(
struct
sk_buff
*
skb
)
{
struct
sock
*
sk
;
s
ctp_association_t
*
asoc
;
s
ctp_endpoint_
t
*
ep
=
NULL
;
s
ctp_endpoint_common_t
*
rcvr
;
s
truct
sctp_association
*
asoc
;
s
truct
sctp_endpoin
t
*
ep
=
NULL
;
s
truct
sctp_ep_common
*
rcvr
;
struct
sctp_transport
*
transport
=
NULL
;
s
ctp_chunk_t
*
chunk
;
s
truct
sctp_chunk
*
chunk
;
struct
sctphdr
*
sh
;
union
sctp_addr
src
;
union
sctp_addr
dest
;
...
...
@@ -128,11 +128,11 @@ int sctp_rcv(struct sk_buff *skb)
if
(
sctp_rcv_checksum
(
skb
)
<
0
)
goto
bad_packet
;
skb_pull
(
skb
,
sizeof
(
struct
sctphdr
));
skb_pull
(
skb
,
sizeof
(
struct
sctphdr
));
family
=
ipver2af
(
skb
->
nh
.
iph
->
version
);
af
=
sctp_get_af_specific
(
family
);
if
(
unlikely
(
!
af
))
if
(
unlikely
(
!
af
))
goto
bad_packet
;
/* Initialize local addresses for lookups. */
...
...
@@ -224,9 +224,7 @@ int sctp_rcv(struct sk_buff *skb)
return
ret
;
bad_packet:
#if 0 /* FIXME */
SCTP_INC_STATS(SctpInErrs);
#endif /* FIXME*/
SCTP_INC_STATS
(
SctpChecksumErrors
);
discard_it:
kfree_skb
(
skb
);
...
...
@@ -252,13 +250,13 @@ int sctp_rcv(struct sk_buff *skb)
*/
int
sctp_backlog_rcv
(
struct
sock
*
sk
,
struct
sk_buff
*
skb
)
{
s
ctp_chunk_t
*
chunk
;
s
truct
sctp_chunk
*
chunk
;
struct
sctp_inq
*
inqueue
;
/* One day chunk will live inside the skb, but for
* now this works.
*/
chunk
=
(
s
ctp_chunk_t
*
)
skb
;
chunk
=
(
s
truct
sctp_chunk
*
)
skb
;
inqueue
=
&
chunk
->
rcvr
->
inqueue
;
sctp_inq_push
(
inqueue
,
chunk
);
...
...
@@ -286,8 +284,8 @@ void sctp_icmp_frag_needed(struct sock *sk, struct sctp_association *asoc,
/* Common lookup code for icmp/icmpv6 error handler. */
struct
sock
*
sctp_err_lookup
(
int
family
,
struct
sk_buff
*
skb
,
struct
sctphdr
*
sctphdr
,
struct
sctp_endpoint
**
epp
,
struct
sctp_association
**
app
,
struct
sctp_endpoint
**
epp
,
struct
sctp_association
**
app
,
struct
sctp_transport
**
tpp
)
{
union
sctp_addr
saddr
;
...
...
@@ -309,15 +307,15 @@ struct sock *sctp_err_lookup(int family, struct sk_buff *skb,
af
->
from_skb
(
&
saddr
,
skb
,
1
);
af
->
from_skb
(
&
daddr
,
skb
,
0
);
/* Look for an association that matches the incoming ICMP error
/* Look for an association that matches the incoming ICMP error
* packet.
*/
asoc
=
__sctp_lookup_association
(
&
saddr
,
&
daddr
,
&
transport
);
if
(
!
asoc
)
{
/* If there is no matching association, see if it matches any
* endpoint. This may happen for an ICMP error generated in
* response to an INIT_ACK.
*/
* endpoint. This may happen for an ICMP error generated in
* response to an INIT_ACK.
*/
ep
=
__sctp_rcv_lookup_endpoint
(
&
daddr
);
if
(
!
ep
)
{
return
NULL
;
...
...
@@ -345,25 +343,25 @@ struct sock *sctp_err_lookup(int family, struct sk_buff *skb,
*
app
=
asoc
;
*
tpp
=
transport
;
return
sk
;
out:
sock_put
(
sk
);
if
(
asoc
)
sctp_association_put
(
asoc
);
if
(
ep
)
if
(
ep
)
sctp_endpoint_put
(
ep
);
return
NULL
;
}
/* Common cleanup code for icmp/icmpv6 error handler. */
void
sctp_err_finish
(
struct
sock
*
sk
,
struct
sctp_endpoint
*
ep
,
void
sctp_err_finish
(
struct
sock
*
sk
,
struct
sctp_endpoint
*
ep
,
struct
sctp_association
*
asoc
)
{
sctp_bh_unlock_sock
(
sk
);
sock_put
(
sk
);
if
(
asoc
)
sctp_association_put
(
asoc
);
if
(
ep
)
if
(
ep
)
sctp_endpoint_put
(
ep
);
}
...
...
@@ -389,8 +387,8 @@ void sctp_v4_err(struct sk_buff *skb, __u32 info)
int
type
=
skb
->
h
.
icmph
->
type
;
int
code
=
skb
->
h
.
icmph
->
code
;
struct
sock
*
sk
;
s
ctp_endpoint_
t
*
ep
;
s
ctp_association_t
*
asoc
;
s
truct
sctp_endpoin
t
*
ep
;
s
truct
sctp_association
*
asoc
;
struct
sctp_transport
*
transport
;
struct
inet_opt
*
inet
;
char
*
saveip
,
*
savesctp
;
...
...
@@ -414,7 +412,7 @@ void sctp_v4_err(struct sk_buff *skb, __u32 info)
ICMP_INC_STATS_BH
(
IcmpInErrors
);
return
;
}
/* Warning: The sock lock is held. Remember to call
/* Warning: The sock lock is held. Remember to call
* sctp_err_finish!
*/
...
...
@@ -520,10 +518,10 @@ int sctp_rcv_ootb(struct sk_buff *skb)
}
/* Insert endpoint into the hash table. */
void
__sctp_hash_endpoint
(
s
ctp_endpoint_
t
*
ep
)
void
__sctp_hash_endpoint
(
s
truct
sctp_endpoin
t
*
ep
)
{
s
ctp_endpoint_common_t
**
epp
;
s
ctp_endpoint_common_t
*
epb
;
s
truct
sctp_ep_common
**
epp
;
s
truct
sctp_ep_common
*
epb
;
sctp_hashbucket_t
*
head
;
epb
=
&
ep
->
base
;
...
...
@@ -542,7 +540,7 @@ void __sctp_hash_endpoint(sctp_endpoint_t *ep)
}
/* Add an endpoint to the hash. Local BH-safe. */
void
sctp_hash_endpoint
(
s
ctp_endpoint_
t
*
ep
)
void
sctp_hash_endpoint
(
s
truct
sctp_endpoin
t
*
ep
)
{
sctp_local_bh_disable
();
__sctp_hash_endpoint
(
ep
);
...
...
@@ -550,10 +548,10 @@ void sctp_hash_endpoint(sctp_endpoint_t *ep)
}
/* Remove endpoint from the hash table. */
void
__sctp_unhash_endpoint
(
s
ctp_endpoint_
t
*
ep
)
void
__sctp_unhash_endpoint
(
s
truct
sctp_endpoin
t
*
ep
)
{
sctp_hashbucket_t
*
head
;
s
ctp_endpoint_common_t
*
epb
;
s
truct
sctp_ep_common
*
epb
;
epb
=
&
ep
->
base
;
...
...
@@ -574,7 +572,7 @@ void __sctp_unhash_endpoint(sctp_endpoint_t *ep)
}
/* Remove endpoint from the hash. Local BH-safe. */
void
sctp_unhash_endpoint
(
s
ctp_endpoint_
t
*
ep
)
void
sctp_unhash_endpoint
(
s
truct
sctp_endpoin
t
*
ep
)
{
sctp_local_bh_disable
();
__sctp_unhash_endpoint
(
ep
);
...
...
@@ -582,11 +580,11 @@ void sctp_unhash_endpoint(sctp_endpoint_t *ep)
}
/* Look up an endpoint. */
s
ctp_endpoint_
t
*
__sctp_rcv_lookup_endpoint
(
const
union
sctp_addr
*
laddr
)
s
truct
sctp_endpoin
t
*
__sctp_rcv_lookup_endpoint
(
const
union
sctp_addr
*
laddr
)
{
sctp_hashbucket_t
*
head
;
s
ctp_endpoint_common_t
*
epb
;
s
ctp_endpoint_
t
*
ep
;
s
truct
sctp_ep_common
*
epb
;
s
truct
sctp_endpoin
t
*
ep
;
int
hash
;
hash
=
sctp_ep_hashfn
(
laddr
->
v4
.
sin_port
);
...
...
@@ -609,7 +607,7 @@ sctp_endpoint_t *__sctp_rcv_lookup_endpoint(const union sctp_addr *laddr)
}
/* Add an association to the hash. Local BH-safe. */
void
sctp_hash_established
(
s
ctp_association_t
*
asoc
)
void
sctp_hash_established
(
s
truct
sctp_association
*
asoc
)
{
sctp_local_bh_disable
();
__sctp_hash_established
(
asoc
);
...
...
@@ -617,10 +615,10 @@ void sctp_hash_established(sctp_association_t *asoc)
}
/* Insert association into the hash table. */
void
__sctp_hash_established
(
s
ctp_association_t
*
asoc
)
void
__sctp_hash_established
(
s
truct
sctp_association
*
asoc
)
{
s
ctp_endpoint_common_t
**
epp
;
s
ctp_endpoint_common_t
*
epb
;
s
truct
sctp_ep_common
**
epp
;
s
truct
sctp_ep_common
*
epb
;
sctp_hashbucket_t
*
head
;
epb
=
&
asoc
->
base
;
...
...
@@ -641,7 +639,7 @@ void __sctp_hash_established(sctp_association_t *asoc)
}
/* Remove association from the hash table. Local BH-safe. */
void
sctp_unhash_established
(
s
ctp_association_t
*
asoc
)
void
sctp_unhash_established
(
s
truct
sctp_association
*
asoc
)
{
sctp_local_bh_disable
();
__sctp_unhash_established
(
asoc
);
...
...
@@ -649,10 +647,10 @@ void sctp_unhash_established(sctp_association_t *asoc)
}
/* Remove association from the hash table. */
void
__sctp_unhash_established
(
s
ctp_association_t
*
asoc
)
void
__sctp_unhash_established
(
s
truct
sctp_association
*
asoc
)
{
sctp_hashbucket_t
*
head
;
s
ctp_endpoint_common_t
*
epb
;
s
truct
sctp_ep_common
*
epb
;
epb
=
&
asoc
->
base
;
...
...
@@ -674,13 +672,14 @@ void __sctp_unhash_established(sctp_association_t *asoc)
}
/* Look up an association. */
sctp_association_t
*
__sctp_lookup_association
(
const
union
sctp_addr
*
local
,
const
union
sctp_addr
*
peer
,
struct
sctp_transport
**
pt
)
struct
sctp_association
*
__sctp_lookup_association
(
const
union
sctp_addr
*
local
,
const
union
sctp_addr
*
peer
,
struct
sctp_transport
**
pt
)
{
sctp_hashbucket_t
*
head
;
s
ctp_endpoint_common_t
*
epb
;
s
ctp_association_t
*
asoc
;
s
truct
sctp_ep_common
*
epb
;
s
truct
sctp_association
*
asoc
;
struct
sctp_transport
*
transport
;
int
hash
;
...
...
@@ -710,11 +709,11 @@ sctp_association_t *__sctp_lookup_association(const union sctp_addr *local,
}
/* Look up an association. BH-safe. */
s
ctp_association_t
*
sctp_lookup_association
(
const
union
sctp_addr
*
laddr
,
s
truct
sctp_association
*
sctp_lookup_association
(
const
union
sctp_addr
*
laddr
,
const
union
sctp_addr
*
paddr
,
struct
sctp_transport
**
transportp
)
{
s
ctp_association_t
*
asoc
;
s
truct
sctp_association
*
asoc
;
sctp_local_bh_disable
();
asoc
=
__sctp_lookup_association
(
laddr
,
paddr
,
transportp
);
...
...
@@ -727,7 +726,7 @@ sctp_association_t *sctp_lookup_association(const union sctp_addr *laddr,
int
sctp_has_association
(
const
union
sctp_addr
*
laddr
,
const
union
sctp_addr
*
paddr
)
{
s
ctp_association_t
*
asoc
;
s
truct
sctp_association
*
asoc
;
struct
sctp_transport
*
transport
;
if
((
asoc
=
sctp_lookup_association
(
laddr
,
paddr
,
&
transport
)))
{
...
...
@@ -757,10 +756,10 @@ int sctp_has_association(const union sctp_addr *laddr,
* in certain circumstances.
*
*/
static
s
ctp_association_t
*
__sctp_rcv_init_lookup
(
struct
sk_buff
*
skb
,
static
s
truct
sctp_association
*
__sctp_rcv_init_lookup
(
struct
sk_buff
*
skb
,
const
union
sctp_addr
*
laddr
,
struct
sctp_transport
**
transportp
)
{
s
ctp_association_t
*
asoc
;
s
truct
sctp_association
*
asoc
;
union
sctp_addr
addr
;
union
sctp_addr
*
paddr
=
&
addr
;
struct
sctphdr
*
sh
=
(
struct
sctphdr
*
)
skb
->
h
.
raw
;
...
...
@@ -815,12 +814,12 @@ static sctp_association_t *__sctp_rcv_init_lookup(struct sk_buff *skb,
}
/* Lookup an association for an inbound skb. */
s
ctp_association_t
*
__sctp_rcv_lookup
(
struct
sk_buff
*
skb
,
s
truct
sctp_association
*
__sctp_rcv_lookup
(
struct
sk_buff
*
skb
,
const
union
sctp_addr
*
paddr
,
const
union
sctp_addr
*
laddr
,
struct
sctp_transport
**
transportp
)
{
s
ctp_association_t
*
asoc
;
s
truct
sctp_association
*
asoc
;
asoc
=
__sctp_lookup_association
(
laddr
,
paddr
,
transportp
);
...
...
net/sctp/inqueue.c
View file @
3e446c25
...
...
@@ -75,17 +75,17 @@ struct sctp_inq *sctp_inq_new(void)
/* Release the memory associated with an SCTP inqueue. */
void
sctp_inq_free
(
struct
sctp_inq
*
queue
)
{
s
ctp_chunk_t
*
chunk
;
s
truct
sctp_chunk
*
chunk
;
/* Empty the queue. */
while
((
chunk
=
(
s
ctp_chunk_t
*
)
skb_dequeue
(
&
queue
->
in
))
!=
NULL
)
sctp_
free_chunk
(
chunk
);
while
((
chunk
=
(
s
truct
sctp_chunk
*
)
skb_dequeue
(
&
queue
->
in
))
)
sctp_
chunk_free
(
chunk
);
/* If there is a packet which is currently being worked on,
* free it as well.
*/
if
(
queue
->
in_progress
)
sctp_
free_chunk
(
queue
->
in_progress
);
sctp_
chunk_free
(
queue
->
in_progress
);
if
(
queue
->
malloced
)
{
/* Dump the master memory segment. */
...
...
@@ -96,7 +96,7 @@ void sctp_inq_free(struct sctp_inq *queue)
/* Put a new packet in an SCTP inqueue.
* We assume that packet->sctp_hdr is set and in host byte order.
*/
void
sctp_inq_push
(
struct
sctp_inq
*
q
,
s
ctp_chunk_t
*
packet
)
void
sctp_inq_push
(
struct
sctp_inq
*
q
,
s
truct
sctp_chunk
*
packet
)
{
/* Directly call the packet handling routine. */
...
...
@@ -114,23 +114,23 @@ void sctp_inq_push(struct sctp_inq *q, sctp_chunk_t *packet)
* WARNING: If you need to put the chunk on another queue, you need to
* make a shallow copy (clone) of it.
*/
s
ctp_chunk_t
*
sctp_inq_pop
(
struct
sctp_inq
*
queue
)
s
truct
sctp_chunk
*
sctp_inq_pop
(
struct
sctp_inq
*
queue
)
{
s
ctp_chunk_t
*
chunk
;
s
truct
sctp_chunk
*
chunk
;
sctp_chunkhdr_t
*
ch
=
NULL
;
/* The assumption is that we are safe to process the chunks
* at this time.
*/
if
((
chunk
=
queue
->
in_progress
)
!=
NULL
)
{
if
((
chunk
=
queue
->
in_progress
))
{
/* There is a packet that we have been working on.
* Any post processing work to do before we move on?
*/
if
(
chunk
->
singleton
||
chunk
->
end_of_packet
||
chunk
->
pdiscard
)
{
sctp_
free_chunk
(
chunk
);
sctp_
chunk_free
(
chunk
);
chunk
=
queue
->
in_progress
=
NULL
;
}
else
{
/* Nothing to do. Next chunk in the packet, please. */
...
...
@@ -149,7 +149,7 @@ sctp_chunk_t *sctp_inq_pop(struct sctp_inq *queue)
return
NULL
;
chunk
=
queue
->
in_progress
=
(
s
ctp_chunk_t
*
)
skb_dequeue
(
&
queue
->
in
);
(
s
truct
sctp_chunk
*
)
skb_dequeue
(
&
queue
->
in
);
/* This is the first chunk in the packet. */
chunk
->
singleton
=
1
;
...
...
net/sctp/ipv6.c
View file @
3e446c25
...
...
@@ -96,8 +96,8 @@ void sctp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
struct
ipv6hdr
*
iph
=
(
struct
ipv6hdr
*
)
skb
->
data
;
struct
sctphdr
*
sh
=
(
struct
sctphdr
*
)(
skb
->
data
+
offset
);
struct
sock
*
sk
;
s
ctp_endpoint_
t
*
ep
;
s
ctp_association_t
*
asoc
;
s
truct
sctp_endpoin
t
*
ep
;
s
truct
sctp_association
*
asoc
;
struct
sctp_transport
*
transport
;
struct
ipv6_pinfo
*
np
;
char
*
saveip
,
*
savesctp
;
...
...
@@ -119,7 +119,7 @@ void sctp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
goto
out
;
}
/* Warning: The sock lock is held. Remember to call
/* Warning: The sock lock is held. Remember to call
* sctp_err_finish!
*/
...
...
@@ -148,21 +148,19 @@ void sctp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
}
/* Based on tcp_v6_xmit() in tcp_ipv6.c. */
static
int
sctp_v6_xmit
(
struct
sk_buff
*
skb
,
struct
sctp_transport
*
transport
,
static
int
sctp_v6_xmit
(
struct
sk_buff
*
skb
,
struct
sctp_transport
*
transport
,
int
ipfragok
)
{
struct
sock
*
sk
=
skb
->
sk
;
struct
ipv6_pinfo
*
np
=
inet6_sk
(
sk
);
struct
flowi
fl
;
struct
dst_entry
*
dst
=
skb
->
dst
;
struct
rt6_info
*
rt6
=
(
struct
rt6_info
*
)
dst
;
fl
.
proto
=
sk
->
protocol
;
/* Fill in the dest address from the route entry passed with the skb
* and the source address from the transport.
*/
fl
.
fl6_dst
=
&
rt6
->
rt6i_dst
.
addr
;
fl
.
fl6_dst
=
&
transport
->
ipaddr
.
v6
.
sin6_
addr
;
fl
.
fl6_src
=
&
transport
->
saddr
.
v6
.
sin6_addr
;
fl
.
fl6_flowlabel
=
np
->
flow_label
;
...
...
@@ -193,7 +191,7 @@ static int sctp_v6_xmit(struct sk_buff *skb, struct sctp_transport *transport,
/* Returns the dst cache entry for the given source and destination ip
* addresses.
*/
struct
dst_entry
*
sctp_v6_get_dst
(
s
ctp_association_t
*
asoc
,
struct
dst_entry
*
sctp_v6_get_dst
(
s
truct
sctp_association
*
asoc
,
union
sctp_addr
*
daddr
,
union
sctp_addr
*
saddr
)
{
...
...
@@ -251,10 +249,10 @@ static inline int sctp_v6_addr_match_len(union sctp_addr *s1,
/* Fills in the source address(saddr) based on the destination address(daddr)
* and asoc's bind address list.
*/
void
sctp_v6_get_saddr
(
s
ctp_association_t
*
asoc
,
struct
dst_entry
*
dst
,
void
sctp_v6_get_saddr
(
s
truct
sctp_association
*
asoc
,
struct
dst_entry
*
dst
,
union
sctp_addr
*
daddr
,
union
sctp_addr
*
saddr
)
{
s
ctp_bind_addr_t
*
bp
;
s
truct
sctp_bind_addr
*
bp
;
rwlock_t
*
addr_lock
;
struct
sockaddr_storage_list
*
laddr
;
struct
list_head
*
pos
;
...
...
@@ -376,11 +374,17 @@ static void sctp_v6_from_sk(union sctp_addr *addr, struct sock *sk)
}
/* Initialize sk->rcv_saddr from sctp_addr. */
static
void
sctp_v6_to_sk
(
union
sctp_addr
*
addr
,
struct
sock
*
sk
)
static
void
sctp_v6_to_sk
_saddr
(
union
sctp_addr
*
addr
,
struct
sock
*
sk
)
{
inet6_sk
(
sk
)
->
rcv_saddr
=
addr
->
v6
.
sin6_addr
;
}
/* Initialize sk->daddr from sctp_addr. */
static
void
sctp_v6_to_sk_daddr
(
union
sctp_addr
*
addr
,
struct
sock
*
sk
)
{
inet6_sk
(
sk
)
->
daddr
=
addr
->
v6
.
sin6_addr
;
}
/* Initialize a sctp_addr from a dst_entry. */
static
void
sctp_v6_dst_saddr
(
union
sctp_addr
*
addr
,
struct
dst_entry
*
dst
,
unsigned
short
port
)
...
...
@@ -391,7 +395,7 @@ static void sctp_v6_dst_saddr(union sctp_addr *addr, struct dst_entry *dst,
ipv6_addr_copy
(
&
addr
->
v6
.
sin6_addr
,
&
rt
->
rt6i_src
.
addr
);
}
/* Compare addresses exactly.
/* Compare addresses exactly.
* FIXME: v4-mapped-v6.
*/
static
int
sctp_v6_cmp_addr
(
const
union
sctp_addr
*
addr1
,
...
...
@@ -521,6 +525,7 @@ struct sock *sctp_v6_create_accept_sk(struct sock *sk,
newsk
->
family
=
PF_INET6
;
newsk
->
protocol
=
IPPROTO_SCTP
;
newsk
->
backlog_rcv
=
sk
->
prot
->
backlog_rcv
;
newsk
->
shutdown
=
sk
->
shutdown
;
newsctp6sk
=
(
struct
sctp6_sock
*
)
newsk
;
newsctp6sk
->
pinet6
=
&
newsctp6sk
->
inet6
;
...
...
@@ -530,10 +535,28 @@ struct sock *sctp_v6_create_accept_sk(struct sock *sk,
memcpy
(
newnp
,
np
,
sizeof
(
struct
ipv6_pinfo
));
ipv6_addr_copy
(
&
newnp
->
daddr
,
&
asoc
->
peer
.
primary_addr
.
v6
.
sin6_addr
);
/* Initialize sk's sport, dport, rcv_saddr and daddr for getsockname()
* and getpeername().
*/
newinet
->
sport
=
inet
->
sport
;
newinet
->
dport
=
asoc
->
peer
.
port
;
newnp
->
saddr
=
np
->
saddr
;
newnp
->
rcv_saddr
=
np
->
rcv_saddr
;
newinet
->
dport
=
htons
(
asoc
->
peer
.
port
);
newnp
->
daddr
=
asoc
->
peer
.
primary_addr
.
v6
.
sin6_addr
;
/* Init the ipv4 part of the socket since we can have sockets
* using v6 API for ipv4.
*/
newinet
->
ttl
=
sysctl_ip_default_ttl
;
newinet
->
mc_loop
=
1
;
newinet
->
mc_ttl
=
1
;
newinet
->
mc_index
=
0
;
newinet
->
mc_list
=
NULL
;
if
(
ipv4_config
.
no_pmtu_disc
)
newinet
->
pmtudisc
=
IP_PMTUDISC_DONT
;
else
newinet
->
pmtudisc
=
IP_PMTUDISC_WANT
;
#ifdef INET_REFCNT_DEBUG
atomic_inc
(
&
inet6_sock_nr
);
...
...
@@ -556,6 +579,12 @@ static int sctp_v6_skb_iif(const struct sk_buff *skb)
return
opt
->
iif
;
}
/* Was this packet marked by Explicit Congestion Notification? */
static
int
sctp_v6_is_ce
(
const
struct
sk_buff
*
skb
)
{
return
*
((
__u32
*
)(
skb
->
nh
.
ipv6h
))
&
htonl
(
1
<<
20
);
}
/* Initialize a PF_INET6 socket msg_name. */
static
void
sctp_inet6_msgname
(
char
*
msgname
,
int
*
addr_len
)
{
...
...
@@ -569,7 +598,7 @@ static void sctp_inet6_msgname(char *msgname, int *addr_len)
}
/* Initialize a PF_INET msgname from a ulpevent. */
static
void
sctp_inet6_event_msgname
(
struct
sctp_ulpevent
*
event
,
static
void
sctp_inet6_event_msgname
(
struct
sctp_ulpevent
*
event
,
char
*
msgname
,
int
*
addrlen
)
{
struct
sockaddr_in6
*
sin6
,
*
sin6from
;
...
...
@@ -596,7 +625,7 @@ static void sctp_inet6_event_msgname(struct sctp_ulpevent *event,
sin6from
=
&
event
->
asoc
->
peer
.
primary_addr
.
v6
;
ipv6_addr_copy
(
&
sin6
->
sin6_addr
,
&
sin6from
->
sin6_addr
);
if
(
ipv6_addr_type
(
&
sin6
->
sin6_addr
)
&
IPV6_ADDR_LINKLOCAL
)
if
(
ipv6_addr_type
(
&
sin6
->
sin6_addr
)
&
IPV6_ADDR_LINKLOCAL
)
sin6
->
sin6_scope_id
=
sin6from
->
sin6_scope_id
;
}
}
...
...
@@ -696,7 +725,7 @@ static int sctp_inet6_bind_verify(struct sctp_opt *opt, union sctp_addr *addr)
if
(
addr
->
v6
.
sin6_scope_id
)
sk
->
bound_dev_if
=
addr
->
v6
.
sin6_scope_id
;
if
(
!
sk
->
bound_dev_if
)
return
0
;
return
0
;
}
af
=
opt
->
pf
->
af
;
}
...
...
@@ -726,11 +755,11 @@ static int sctp_inet6_send_verify(struct sctp_opt *opt, union sctp_addr *addr)
if
(
addr
->
v6
.
sin6_scope_id
)
sk
->
bound_dev_if
=
addr
->
v6
.
sin6_scope_id
;
if
(
!
sk
->
bound_dev_if
)
return
0
;
return
0
;
}
af
=
opt
->
pf
->
af
;
}
return
af
!=
NULL
;
}
...
...
@@ -807,7 +836,8 @@ static struct sctp_af sctp_ipv6_specific = {
.
copy_addrlist
=
sctp_v6_copy_addrlist
,
.
from_skb
=
sctp_v6_from_skb
,
.
from_sk
=
sctp_v6_from_sk
,
.
to_sk
=
sctp_v6_to_sk
,
.
to_sk_saddr
=
sctp_v6_to_sk_saddr
,
.
to_sk_daddr
=
sctp_v6_to_sk_daddr
,
.
dst_saddr
=
sctp_v6_dst_saddr
,
.
cmp_addr
=
sctp_v6_cmp_addr
,
.
scope
=
sctp_v6_scope
,
...
...
@@ -816,6 +846,7 @@ static struct sctp_af sctp_ipv6_specific = {
.
is_any
=
sctp_v6_is_any
,
.
available
=
sctp_v6_available
,
.
skb_iif
=
sctp_v6_skb_iif
,
.
is_ce
=
sctp_v6_is_ce
,
.
net_header_len
=
sizeof
(
struct
ipv6hdr
),
.
sockaddr_len
=
sizeof
(
struct
sockaddr_in6
),
.
sa_family
=
AF_INET6
,
...
...
net/sctp/objcnt.c
View file @
3e446c25
...
...
@@ -55,6 +55,7 @@ SCTP_DBG_OBJCNT(bind_addr);
SCTP_DBG_OBJCNT
(
chunk
);
SCTP_DBG_OBJCNT
(
addr
);
SCTP_DBG_OBJCNT
(
ssnmap
);
SCTP_DBG_OBJCNT
(
datamsg
);
/* An array to make it easy to pretty print the debug information
* to the proc fs.
...
...
@@ -68,6 +69,7 @@ sctp_dbg_objcnt_entry_t sctp_dbg_objcnt[] = {
SCTP_DBG_OBJCNT_ENTRY
(
bind_addr
),
SCTP_DBG_OBJCNT_ENTRY
(
addr
),
SCTP_DBG_OBJCNT_ENTRY
(
ssnmap
),
SCTP_DBG_OBJCNT_ENTRY
(
datamsg
),
};
/* Callback from procfs to read out objcount information.
...
...
net/sctp/output.c
View file @
3e446c25
...
...
@@ -114,7 +114,7 @@ void sctp_packet_free(struct sctp_packet *packet)
struct
sctp_chunk
*
chunk
;
while
((
chunk
=
(
struct
sctp_chunk
*
)
__skb_dequeue
(
&
packet
->
chunks
)))
sctp_
free_chunk
(
chunk
);
sctp_
chunk_free
(
chunk
);
if
(
packet
->
malloced
)
kfree
(
packet
);
...
...
@@ -166,11 +166,11 @@ static sctp_xmit_t sctp_packet_bundle_sack(struct sctp_packet *pkt,
/* If sending DATA and haven't aleady bundled a SACK, try to
* bundle one in to the packet.
*/
if
(
sctp_chunk_is_data
(
chunk
)
&&
!
pkt
->
has_sack
&&
if
(
sctp_chunk_is_data
(
chunk
)
&&
!
pkt
->
has_sack
&&
!
pkt
->
has_cookie_echo
)
{
struct
sctp_association
*
asoc
;
asoc
=
pkt
->
transport
->
asoc
;
if
(
asoc
->
a_rwnd
>
asoc
->
rwnd
)
{
struct
sctp_chunk
*
sack
;
asoc
->
a_rwnd
=
asoc
->
rwnd
;
...
...
@@ -205,7 +205,7 @@ sctp_xmit_t sctp_packet_append_chunk(struct sctp_packet *packet,
if
(
retval
!=
SCTP_XMIT_OK
)
goto
finish
;
pmtu
=
((
packet
->
transport
->
asoc
)
?
(
packet
->
transport
->
asoc
->
pmtu
)
:
(
packet
->
transport
->
pmtu
));
...
...
@@ -219,21 +219,16 @@ sctp_xmit_t sctp_packet_append_chunk(struct sctp_packet *packet,
/* Both control chunks and data chunks with TSNs are
* non-fragmentable.
*/
int
fragmentable
=
sctp_chunk_is_data
(
chunk
)
&&
(
!
chunk
->
has_tsn
);
if
(
packet_empty
)
{
if
(
fragmentable
)
{
retval
=
SCTP_XMIT_MUST_FRAG
;
goto
finish
;
}
else
{
/* The packet is too big but we can
* not fragment it--we have to just
* transmit and rely on IP
* fragmentation.
*/
packet
->
ipfragok
=
1
;
goto
append
;
}
/* We no longer do refragmentation at all.
* Just fragment at the IP layer, if we
* actually hit this condition
*/
packet
->
ipfragok
=
1
;
goto
append
;
}
else
{
/* !packet_empty */
retval
=
SCTP_XMIT_PMTU_FULL
;
goto
finish
;
...
...
@@ -259,7 +254,7 @@ sctp_xmit_t sctp_packet_append_chunk(struct sctp_packet *packet,
goto
finish
;
}
else
if
(
SCTP_CID_COOKIE_ECHO
==
chunk
->
chunk_hdr
->
type
)
packet
->
has_cookie_echo
=
1
;
else
if
(
SCTP_CID_SACK
==
chunk
->
chunk_hdr
->
type
)
else
if
(
SCTP_CID_SACK
==
chunk
->
chunk_hdr
->
type
)
packet
->
has_sack
=
1
;
/* It is OK to send this chunk. */
...
...
@@ -276,8 +271,8 @@ sctp_xmit_t sctp_packet_append_chunk(struct sctp_packet *packet,
*/
int
sctp_packet_transmit
(
struct
sctp_packet
*
packet
)
{
struct
sctp_transport
*
t
ransport
=
packet
->
transport
;
struct
sctp_association
*
asoc
=
t
ransport
->
asoc
;
struct
sctp_transport
*
t
p
=
packet
->
transport
;
struct
sctp_association
*
asoc
=
t
p
->
asoc
;
struct
sctphdr
*
sh
;
__u32
crc32
;
struct
sk_buff
*
nskb
;
...
...
@@ -311,6 +306,31 @@ int sctp_packet_transmit(struct sctp_packet *packet)
*/
skb_set_owner_w
(
nskb
,
sk
);
/* Build the SCTP header. */
sh
=
(
struct
sctphdr
*
)
skb_push
(
nskb
,
sizeof
(
struct
sctphdr
));
sh
->
source
=
htons
(
packet
->
source_port
);
sh
->
dest
=
htons
(
packet
->
destination_port
);
/* From 6.8 Adler-32 Checksum Calculation:
* After the packet is constructed (containing the SCTP common
* header and one or more control or DATA chunks), the
* transmitter shall:
*
* 1) Fill in the proper Verification Tag in the SCTP common
* header and initialize the checksum field to 0's.
*/
sh
->
vtag
=
htonl
(
packet
->
vtag
);
sh
->
checksum
=
0
;
/* 2) Calculate the Adler-32 checksum of the whole packet,
* including the SCTP common header and all the
* chunks.
*
* Note: Adler-32 is no longer applicable, as has been replaced
* by CRC32-C as described in <draft-ietf-tsvwg-sctpcsum-02.txt>.
*/
crc32
=
sctp_start_cksum
((
__u8
*
)
sh
,
sizeof
(
struct
sctphdr
));
/**
* 6.10 Bundling
*
...
...
@@ -332,10 +352,12 @@ int sctp_packet_transmit(struct sctp_packet *packet)
*/
SCTP_DEBUG_PRINTK
(
"***sctp_transmit_packet***
\n
"
);
while
((
chunk
=
(
struct
sctp_chunk
*
)
__skb_dequeue
(
&
packet
->
chunks
)))
{
chunk
->
num_times_sent
++
;
chunk
->
sent_at
=
jiffies
;
if
(
sctp_chunk_is_data
(
chunk
))
{
sctp_chunk_assign_tsn
(
chunk
);
if
(
!
chunk
->
has_tsn
)
{
sctp_chunk_assign_ssn
(
chunk
);
sctp_chunk_assign_tsn
(
chunk
);
/* 6.3.1 C4) When data is in flight and when allowed
* by rule C5, a new RTT measurement MUST be made each
...
...
@@ -343,19 +365,27 @@ int sctp_packet_transmit(struct sctp_packet *packet)
* SHOULD be made no more than once per round-trip
* for a given destination transport address.
*/
if
((
1
==
chunk
->
num_times_sent
)
&&
(
!
transport
->
rto_pending
))
{
chunk
->
rtt_in_progress
=
1
;
transport
->
rto_pending
=
1
;
}
if
(
!
tp
->
rto_pending
)
{
chunk
->
rtt_in_progress
=
1
;
tp
->
rto_pending
=
1
;
}
}
else
chunk
->
resent
=
1
;
chunk
->
sent_at
=
jiffies
;
has_data
=
1
;
}
memcpy
(
skb_put
(
nskb
,
chunk
->
skb
->
len
),
chunk
->
skb
->
data
,
chunk
->
skb
->
len
);
padding
=
WORD_ROUND
(
chunk
->
skb
->
len
)
-
chunk
->
skb
->
len
;
memset
(
skb_put
(
nskb
,
padding
),
0
,
padding
);
SCTP_DEBUG_PRINTK
(
"%s %p[%s] %s 0x%x, %s %d, %s %d, %s %d, "
"%s %d
\n
"
,
if
(
padding
)
memset
(
skb_put
(
chunk
->
skb
,
padding
),
0
,
padding
);
crc32
=
sctp_update_copy_cksum
(
skb_put
(
nskb
,
chunk
->
skb
->
len
),
chunk
->
skb
->
data
,
chunk
->
skb
->
len
,
crc32
);
SCTP_DEBUG_PRINTK
(
"%s %p[%s] %s 0x%x, %s %d, %s %d, %s %d
\n
"
,
"*** Chunk"
,
chunk
,
sctp_cname
(
SCTP_ST_CHUNK
(
chunk
->
chunk_hdr
->
type
)),
...
...
@@ -364,7 +394,6 @@ int sctp_packet_transmit(struct sctp_packet *packet)
ntohl
(
chunk
->
subh
.
data_hdr
->
tsn
)
:
0
,
"length"
,
ntohs
(
chunk
->
chunk_hdr
->
length
),
"chunk->skb->len"
,
chunk
->
skb
->
len
,
"num_times_sent"
,
chunk
->
num_times_sent
,
"rtt_in_progress"
,
chunk
->
rtt_in_progress
);
/*
...
...
@@ -373,33 +402,10 @@ int sctp_packet_transmit(struct sctp_packet *packet)
* acknowledged or have failed.
*/
if
(
!
sctp_chunk_is_data
(
chunk
))
sctp_free_chunk
(
chunk
);
sctp_chunk_free
(
chunk
);
}
/* Build the SCTP header. */
sh
=
(
struct
sctphdr
*
)
skb_push
(
nskb
,
sizeof
(
struct
sctphdr
));
sh
->
source
=
htons
(
packet
->
source_port
);
sh
->
dest
=
htons
(
packet
->
destination_port
);
/* From 6.8 Adler-32 Checksum Calculation:
* After the packet is constructed (containing the SCTP common
* header and one or more control or DATA chunks), the
* transmitter shall:
*
* 1) Fill in the proper Verification Tag in the SCTP common
* header and initialize the checksum field to 0's.
*/
sh
->
vtag
=
htonl
(
packet
->
vtag
);
sh
->
checksum
=
0
;
/* 2) Calculate the Adler-32 checksum of the whole packet,
* including the SCTP common header and all the
* chunks.
*
* Note: Adler-32 is no longer applicable, as has been replaced
* by CRC32-C as described in <draft-ietf-tsvwg-sctpcsum-02.txt>.
*/
crc32
=
sctp_start_cksum
((
__u8
*
)
sh
,
nskb
->
len
);
/* Perform final transformation on checksum. */
crc32
=
sctp_end_cksum
(
crc32
);
/* 3) Put the resultant value into the checksum field in the
...
...
@@ -413,17 +419,13 @@ int sctp_packet_transmit(struct sctp_packet *packet)
* data sender to indicate that the end-points of the
* transport protocol are ECN-capable."
*
* If ECN capable && negotiated && it makes sense for
* this packet to support it (e.g. post ECN negotiation)
* then lets set the ECT bit
* Now setting the ECT bit all the time, as it should not cause
* any problems protocol-wise even if our peer ignores it.
*
* FIXME: Need to do something else for IPv6
* Note: The works for IPv6 layer checks this bit too later
* in transmission. See IP6_ECN_flow_xmit().
*/
if
(
packet
->
ecn_capable
)
{
INET_ECN_xmit
(
nskb
->
sk
);
}
else
{
INET_ECN_dontxmit
(
nskb
->
sk
);
}
INET_ECN_xmit
(
nskb
->
sk
);
/* Set up the IP options. */
/* BUG: not implemented
...
...
@@ -431,22 +433,21 @@ int sctp_packet_transmit(struct sctp_packet *packet)
*/
/* Dump that on IP! */
if
(
asoc
&&
asoc
->
peer
.
last_sent_to
!=
t
ransport
)
{
if
(
asoc
&&
asoc
->
peer
.
last_sent_to
!=
t
p
)
{
/* Considering the multiple CPU scenario, this is a
* "correcter" place for last_sent_to. --xguo
*/
asoc
->
peer
.
last_sent_to
=
t
ransport
;
asoc
->
peer
.
last_sent_to
=
t
p
;
}
if
(
has_data
)
{
struct
timer_list
*
timer
;
unsigned
long
timeout
;
t
ransport
->
last_time_used
=
jiffies
;
t
p
->
last_time_used
=
jiffies
;
/* Restart the AUTOCLOSE timer when sending data. */
if
((
SCTP_STATE_ESTABLISHED
==
asoc
->
state
)
&&
(
asoc
->
autoclose
))
{
if
(
sctp_state
(
asoc
,
ESTABLISHED
)
&&
asoc
->
autoclose
)
{
timer
=
&
asoc
->
timers
[
SCTP_EVENT_TIMEOUT_AUTOCLOSE
];
timeout
=
asoc
->
timeouts
[
SCTP_EVENT_TIMEOUT_AUTOCLOSE
];
...
...
@@ -455,21 +456,21 @@ int sctp_packet_transmit(struct sctp_packet *packet)
}
}
dst
=
t
ransport
->
dst
;
dst
=
t
p
->
dst
;
/* The 'obsolete' field of dst is set to 2 when a dst is freed. */
if
(
!
dst
||
(
dst
->
obsolete
>
1
))
{
dst_release
(
dst
);
sctp_transport_route
(
t
ransport
,
NULL
,
sctp_sk
(
sk
));
sctp_transport_route
(
t
p
,
NULL
,
sctp_sk
(
sk
));
sctp_assoc_sync_pmtu
(
asoc
);
}
nskb
->
dst
=
dst_clone
(
t
ransport
->
dst
);
nskb
->
dst
=
dst_clone
(
t
p
->
dst
);
if
(
!
nskb
->
dst
)
goto
no_route
;
SCTP_DEBUG_PRINTK
(
"***sctp_transmit_packet*** skb length %d
\n
"
,
nskb
->
len
);
(
*
t
ransport
->
af_specific
->
sctp_xmit
)(
nskb
,
transport
,
packet
->
ipfragok
);
(
*
t
p
->
af_specific
->
sctp_xmit
)(
nskb
,
tp
,
packet
->
ipfragok
);
out:
packet
->
size
=
SCTP_IP_OVERHEAD
;
return
err
;
...
...
@@ -596,8 +597,8 @@ static sctp_xmit_t sctp_packet_append_data(struct sctp_packet *packet,
* if any previously transmitted data on the connection remains
* unacknowledged.
*/
if
(
!
sp
->
nodelay
&&
SCTP_IP_OVERHEAD
==
packet
->
size
&&
q
->
outstanding_bytes
&&
SCTP_STATE_ESTABLISHED
==
asoc
->
state
)
{
if
(
!
sp
->
nodelay
&&
SCTP_IP_OVERHEAD
==
packet
->
size
&&
q
->
outstanding_bytes
&&
sctp_state
(
asoc
,
ESTABLISHED
)
)
{
unsigned
len
=
datasize
+
q
->
out_qlen
;
/* Check whether this chunk and all the rest of pending
...
...
@@ -623,6 +624,8 @@ static sctp_xmit_t sctp_packet_append_data(struct sctp_packet *packet,
rwnd
=
0
;
asoc
->
peer
.
rwnd
=
rwnd
;
/* Has been accepted for transmission. */
chunk
->
msg
->
can_expire
=
0
;
finish:
return
retval
;
...
...
net/sctp/outqueue.c
View file @
3e446c25
...
...
@@ -55,13 +55,19 @@
#include <net/sctp/sctp.h>
/* Declare internal functions here. */
static
int
sctp_acked
(
s
ctp_sackhdr_t
*
sack
,
__u32
tsn
);
static
int
sctp_acked
(
s
truct
sctp_sackhdr
*
sack
,
__u32
tsn
);
static
void
sctp_check_transmitted
(
struct
sctp_outq
*
q
,
struct
list_head
*
transmitted_queue
,
struct
sctp_transport
*
transport
,
s
ctp_sackhdr_t
*
sack
,
s
truct
sctp_sackhdr
*
sack
,
__u32
highest_new_tsn
);
static
void
sctp_mark_missing
(
struct
sctp_outq
*
q
,
struct
list_head
*
transmitted_queue
,
struct
sctp_transport
*
transport
,
__u32
highest_new_tsn
,
int
count_of_newacks
);
/* Add data to the front of the queue. */
static
inline
void
sctp_outq_head_data
(
struct
sctp_outq
*
q
,
struct
sctp_chunk
*
ch
)
...
...
@@ -94,13 +100,105 @@ static inline void sctp_outq_insert_data(struct sctp_outq *q,
struct
sctp_chunk
*
ch
,
struct
sctp_chunk
*
pos
)
{
__skb_insert
((
struct
sk_buff
*
)
ch
,
(
struct
sk_buff
*
)
pos
->
prev
,
__skb_insert
((
struct
sk_buff
*
)
ch
,
(
struct
sk_buff
*
)
pos
->
prev
,
(
struct
sk_buff
*
)
pos
,
pos
->
list
);
q
->
out_qlen
+=
ch
->
skb
->
len
;
}
/*
* SFR-CACC algorithm:
* D) If count_of_newacks is greater than or equal to 2
* and t was not sent to the current primary then the
* sender MUST NOT increment missing report count for t.
*/
static
inline
int
sctp_cacc_skip_3_1_d
(
struct
sctp_transport
*
primary
,
struct
sctp_transport
*
transport
,
int
count_of_newacks
)
{
if
(
count_of_newacks
>=
2
&&
transport
!=
primary
)
return
1
;
return
0
;
}
/*
* SFR-CACC algorithm:
* F) If count_of_newacks is less than 2, let d be the
* destination to which t was sent. If cacc_saw_newack
* is 0 for destination d, then the sender MUST NOT
* increment missing report count for t.
*/
static
inline
int
sctp_cacc_skip_3_1_f
(
struct
sctp_transport
*
transport
,
int
count_of_newacks
)
{
if
(
count_of_newacks
<
2
&&
!
transport
->
cacc
.
cacc_saw_newack
)
return
1
;
return
0
;
}
/*
* SFR-CACC algorithm:
* 3.1) If CYCLING_CHANGEOVER is 0, the sender SHOULD
* execute steps C, D, F.
*
* C has been implemented in sctp_outq_sack
*/
static
inline
int
sctp_cacc_skip_3_1
(
struct
sctp_transport
*
primary
,
struct
sctp_transport
*
transport
,
int
count_of_newacks
)
{
if
(
!
primary
->
cacc
.
cycling_changeover
)
{
if
(
sctp_cacc_skip_3_1_d
(
primary
,
transport
,
count_of_newacks
))
return
1
;
if
(
sctp_cacc_skip_3_1_f
(
transport
,
count_of_newacks
));
return
1
;
return
0
;
}
return
0
;
}
/*
* SFR-CACC algorithm:
* 3.2) Else if CYCLING_CHANGEOVER is 1, and t is less
* than next_tsn_at_change of the current primary, then
* the sender MUST NOT increment missing report count
* for t.
*/
static
inline
int
sctp_cacc_skip_3_2
(
struct
sctp_transport
*
primary
,
__u32
tsn
)
{
if
(
primary
->
cacc
.
cycling_changeover
&&
TSN_lt
(
tsn
,
primary
->
cacc
.
next_tsn_at_change
))
return
1
;
return
0
;
}
/*
* SFR-CACC algorithm:
* 3) If the missing report count for TSN t is to be
* incremented according to [RFC2960] and
* [SCTP_STEWART-2002], and CHANGEOVER_ACTIVE is set,
* then the sender MUST futher execute steps 3.1 and
* 3.2 to determine if the missing report count for
* TSN t SHOULD NOT be incremented.
*
* 3.3) If 3.1 and 3.2 do not dictate that the missing
* report count for t should not be incremented, then
* the sender SOULD increment missing report count for
* t (according to [RFC2960] and [SCTP_STEWART_2002]).
*/
static
inline
int
sctp_cacc_skip
(
struct
sctp_transport
*
primary
,
struct
sctp_transport
*
transport
,
int
count_of_newacks
,
__u32
tsn
)
{
if
(
primary
->
cacc
.
changeover_active
&&
(
sctp_cacc_skip_3_1
(
primary
,
transport
,
count_of_newacks
)
||
sctp_cacc_skip_3_2
(
primary
,
tsn
)))
return
1
;
return
0
;
}
/* Generate a new outqueue. */
struct
sctp_outq
*
sctp_outq_new
(
s
ctp_association_t
*
asoc
)
struct
sctp_outq
*
sctp_outq_new
(
s
truct
sctp_association
*
asoc
)
{
struct
sctp_outq
*
q
;
...
...
@@ -116,7 +214,7 @@ struct sctp_outq *sctp_outq_new(sctp_association_t *asoc)
* You still need to define handlers if you really want to DO
* something with this structure...
*/
void
sctp_outq_init
(
s
ctp_association_t
*
asoc
,
struct
sctp_outq
*
q
)
void
sctp_outq_init
(
s
truct
sctp_association
*
asoc
,
struct
sctp_outq
*
q
)
{
q
->
asoc
=
asoc
;
skb_queue_head_init
(
&
q
->
out
);
...
...
@@ -132,6 +230,7 @@ void sctp_outq_init(sctp_association_t *asoc, struct sctp_outq *q)
q
->
outstanding_bytes
=
0
;
q
->
empty
=
1
;
q
->
cork
=
0
;
q
->
malloced
=
0
;
q
->
out_qlen
=
0
;
...
...
@@ -143,59 +242,51 @@ void sctp_outq_teardown(struct sctp_outq *q)
{
struct
sctp_transport
*
transport
;
struct
list_head
*
lchunk
,
*
pos
,
*
temp
;
sctp_chunk_t
*
chunk
;
struct
sctp_ulpevent
*
ev
;
struct
sctp_chunk
*
chunk
;
/* Throw away unacknowledged chunks. */
list_for_each
(
pos
,
&
q
->
asoc
->
peer
.
transport_addr_list
)
{
transport
=
list_entry
(
pos
,
struct
sctp_transport
,
transports
);
while
((
lchunk
=
sctp_list_dequeue
(
&
transport
->
transmitted
)))
{
chunk
=
list_entry
(
lchunk
,
s
ctp_chunk_t
,
chunk
=
list_entry
(
lchunk
,
s
truct
sctp_chunk
,
transmitted_list
);
/* Generate a SEND FAILED event. */
ev
=
sctp_ulpevent_make_send_failed
(
q
->
asoc
,
chunk
,
SCTP_DATA_SENT
,
q
->
error
,
GFP_ATOMIC
);
if
(
ev
)
sctp_ulpq_tail_event
(
&
q
->
asoc
->
ulpq
,
ev
);
sctp_free_chunk
(
chunk
);
/* Mark as part of a failed message. */
sctp_datamsg_fail
(
chunk
,
q
->
error
);
sctp_chunk_free
(
chunk
);
}
}
/* Throw away chunks that have been gap ACKed. */
list_for_each_safe
(
lchunk
,
temp
,
&
q
->
sacked
)
{
list_del
(
lchunk
);
chunk
=
list_entry
(
lchunk
,
sctp_chunk_t
,
transmitted_list
);
sctp_free_chunk
(
chunk
);
chunk
=
list_entry
(
lchunk
,
struct
sctp_chunk
,
transmitted_list
);
sctp_datamsg_fail
(
chunk
,
q
->
error
);
sctp_chunk_free
(
chunk
);
}
/* Throw away any chunks in the retransmit queue. */
list_for_each_safe
(
lchunk
,
temp
,
&
q
->
retransmit
)
{
list_del
(
lchunk
);
chunk
=
list_entry
(
lchunk
,
sctp_chunk_t
,
transmitted_list
);
sctp_free_chunk
(
chunk
);
chunk
=
list_entry
(
lchunk
,
struct
sctp_chunk
,
transmitted_list
);
sctp_datamsg_fail
(
chunk
,
q
->
error
);
sctp_chunk_free
(
chunk
);
}
/* Throw away any leftover data chunks. */
while
((
chunk
=
sctp_outq_dequeue_data
(
q
)))
{
/* Generate a SEND FAILED event. */
ev
=
sctp_ulpevent_make_send_failed
(
q
->
asoc
,
chunk
,
SCTP_DATA_UNSENT
,
q
->
error
,
GFP_ATOMIC
);
if
(
ev
)
sctp_ulpq_tail_event
(
&
q
->
asoc
->
ulpq
,
ev
);
sctp_free_chunk
(
chunk
);
/* Mark as send failure. */
sctp_datamsg_fail
(
chunk
,
q
->
error
);
sctp_chunk_free
(
chunk
);
}
q
->
error
=
0
;
/* Throw away any leftover control chunks. */
while
((
chunk
=
(
s
ctp_chunk_t
*
)
skb_dequeue
(
&
q
->
control
)))
sctp_
free_chunk
(
chunk
);
while
((
chunk
=
(
s
truct
sctp_chunk
*
)
skb_dequeue
(
&
q
->
control
)))
sctp_
chunk_free
(
chunk
);
}
/* Free the outqueue structure and any related pending chunks. */
...
...
@@ -210,7 +301,7 @@ void sctp_outq_free(struct sctp_outq *q)
}
/* Put a new chunk in an sctp_outq. */
int
sctp_outq_tail
(
struct
sctp_outq
*
q
,
s
ctp_chunk_t
*
chunk
)
int
sctp_outq_tail
(
struct
sctp_outq
*
q
,
s
truct
sctp_chunk
*
chunk
)
{
int
error
=
0
;
...
...
@@ -265,7 +356,8 @@ int sctp_outq_tail(struct sctp_outq *q, sctp_chunk_t *chunk)
if
(
error
<
0
)
return
error
;
error
=
sctp_outq_flush
(
q
,
0
);
if
(
!
q
->
cork
)
error
=
sctp_outq_flush
(
q
,
0
);
return
error
;
}
...
...
@@ -276,15 +368,16 @@ int sctp_outq_tail(struct sctp_outq *q, sctp_chunk_t *chunk)
void
sctp_retransmit_insert
(
struct
list_head
*
tlchunk
,
struct
sctp_outq
*
q
)
{
struct
list_head
*
rlchunk
;
s
ctp_chunk_t
*
tchunk
,
*
rchunk
;
s
truct
sctp_chunk
*
tchunk
,
*
rchunk
;
__u32
ttsn
,
rtsn
;
int
done
=
0
;
tchunk
=
list_entry
(
tlchunk
,
s
ctp_chunk_t
,
transmitted_list
);
tchunk
=
list_entry
(
tlchunk
,
s
truct
sctp_chunk
,
transmitted_list
);
ttsn
=
ntohl
(
tchunk
->
subh
.
data_hdr
->
tsn
);
list_for_each
(
rlchunk
,
&
q
->
retransmit
)
{
rchunk
=
list_entry
(
rlchunk
,
sctp_chunk_t
,
transmitted_list
);
rchunk
=
list_entry
(
rlchunk
,
struct
sctp_chunk
,
transmitted_list
);
rtsn
=
ntohl
(
rchunk
->
subh
.
data_hdr
->
tsn
);
if
(
TSN_lt
(
ttsn
,
rtsn
))
{
list_add
(
tlchunk
,
rlchunk
->
prev
);
...
...
@@ -303,11 +396,12 @@ void sctp_retransmit_mark(struct sctp_outq *q,
__u8
fast_retransmit
)
{
struct
list_head
*
lchunk
,
*
ltemp
;
s
ctp_chunk_t
*
chunk
;
s
truct
sctp_chunk
*
chunk
;
/* Walk through the specified transmitted queue. */
list_for_each_safe
(
lchunk
,
ltemp
,
&
transport
->
transmitted
)
{
chunk
=
list_entry
(
lchunk
,
sctp_chunk_t
,
transmitted_list
);
chunk
=
list_entry
(
lchunk
,
struct
sctp_chunk
,
transmitted_list
);
/* If we are doing retransmission due to a fast retransmit,
* only the chunk's that are marked for fast retransmit
...
...
@@ -416,8 +510,8 @@ static int sctp_outq_flush_rtx(struct sctp_outq *q, struct sctp_packet *pkt,
struct
list_head
*
lchunk
;
struct
sctp_transport
*
transport
=
pkt
->
transport
;
sctp_xmit_t
status
;
s
ctp_chunk_t
*
chunk
;
s
ctp_association_t
*
asoc
;
s
truct
sctp_chunk
*
chunk
;
s
truct
sctp_association
*
asoc
;
int
error
=
0
;
asoc
=
q
->
asoc
;
...
...
@@ -442,7 +536,8 @@ static int sctp_outq_flush_rtx(struct sctp_outq *q, struct sctp_packet *pkt,
lchunk
=
sctp_list_dequeue
(
lqueue
);
while
(
lchunk
)
{
chunk
=
list_entry
(
lchunk
,
sctp_chunk_t
,
transmitted_list
);
chunk
=
list_entry
(
lchunk
,
struct
sctp_chunk
,
transmitted_list
);
/* Make sure that Gap Acked TSNs are not retransmitted. A
* simple approach is just to move such TSNs out of the
...
...
@@ -504,215 +599,19 @@ static int sctp_outq_flush_rtx(struct sctp_outq *q, struct sctp_packet *pkt,
return
error
;
}
/* This routine either transmits the fragment or puts it on the output
* queue. 'pos' points to the next chunk in the output queue after the
* chunk that is currently in the process of fragmentation.
*/
void
sctp_xmit_frag
(
struct
sctp_outq
*
q
,
struct
sctp_chunk
*
pos
,
struct
sctp_packet
*
packet
,
struct
sctp_chunk
*
frag
,
__u32
tsn
)
/* Cork the outqueue so queued chunks are really queued. */
int
sctp_outq_uncork
(
struct
sctp_outq
*
q
)
{
struct
sctp_transport
*
transport
=
packet
->
transport
;
struct
sk_buff_head
*
queue
=
&
q
->
out
;
sctp_xmit_t
status
;
int
error
;
frag
->
subh
.
data_hdr
->
tsn
=
htonl
(
tsn
);
frag
->
has_tsn
=
1
;
/* An inner fragment may be smaller than the earlier one and may get
* in if we call q->build_output. This ensures that all the fragments
* are sent in order.
*/
if
(
!
skb_queue_empty
(
queue
))
{
SCTP_DEBUG_PRINTK
(
"sctp_xmit_frag: q not empty. "
"adding 0x%x to outqueue
\n
"
,
ntohl
(
frag
->
subh
.
data_hdr
->
tsn
));
if
(
pos
)
sctp_outq_insert_data
(
q
,
frag
,
pos
);
else
sctp_outq_tail_data
(
q
,
frag
);
return
;
}
/* Add the chunk fragment to the packet. */
status
=
(
*
q
->
build_output
)(
packet
,
frag
);
switch
(
status
)
{
case
SCTP_XMIT_RWND_FULL
:
/* RWND is full, so put the chunk in the output queue. */
SCTP_DEBUG_PRINTK
(
"sctp_xmit_frag: rwnd full. "
"adding 0x%x to outqueue
\n
"
,
ntohl
(
frag
->
subh
.
data_hdr
->
tsn
));
if
(
pos
)
sctp_outq_insert_data
(
q
,
frag
,
pos
);
else
sctp_outq_tail_data
(
q
,
frag
);
break
;
case
SCTP_XMIT_OK
:
error
=
(
*
q
->
force_output
)(
packet
);
if
(
error
<
0
)
{
/* Packet could not be transmitted, put the chunk in
* the output queue
*/
SCTP_DEBUG_PRINTK
(
"sctp_xmit_frag: force output "
"failed. adding 0x%x to outqueue
\n
"
,
ntohl
(
frag
->
subh
.
data_hdr
->
tsn
));
if
(
pos
)
sctp_outq_insert_data
(
q
,
frag
,
pos
);
else
sctp_outq_tail_data
(
q
,
frag
);
}
else
{
SCTP_DEBUG_PRINTK
(
"sctp_xmit_frag: force output "
"success. 0x%x sent
\n
"
,
ntohl
(
frag
->
subh
.
data_hdr
->
tsn
));
list_add_tail
(
&
frag
->
transmitted_list
,
&
transport
->
transmitted
);
sctp_transport_reset_timers
(
transport
);
}
break
;
default:
BUG
();
};
}
/* This routine calls sctp_xmit_frag() for all the fragments of a message.
* The argument 'frag' point to the first fragment and it holds the list
* of all the other fragments in the 'frag_list' field.
*/
void
sctp_xmit_fragmented_chunks
(
struct
sctp_outq
*
q
,
struct
sctp_packet
*
pkt
,
sctp_chunk_t
*
frag
)
{
sctp_association_t
*
asoc
=
frag
->
asoc
;
struct
list_head
*
lfrag
,
*
frag_list
;
__u32
tsn
;
int
nfrags
=
1
;
struct
sctp_chunk
*
pos
;
/* Count the number of fragments. */
frag_list
=
&
frag
->
frag_list
;
list_for_each
(
lfrag
,
frag_list
)
{
nfrags
++
;
}
/* Get a TSN block of nfrags TSNs. */
tsn
=
sctp_association_get_tsn_block
(
asoc
,
nfrags
);
pos
=
(
struct
sctp_chunk
*
)
skb_peek
(
&
q
->
out
);
/* Transmit the first fragment. */
sctp_xmit_frag
(
q
,
pos
,
pkt
,
frag
,
tsn
++
);
/* Transmit the rest of fragments. */
frag_list
=
&
frag
->
frag_list
;
list_for_each
(
lfrag
,
frag_list
)
{
frag
=
list_entry
(
lfrag
,
sctp_chunk_t
,
frag_list
);
sctp_xmit_frag
(
q
,
pos
,
pkt
,
frag
,
tsn
++
);
}
}
/* This routine breaks the given chunk into 'max_frag_data_len' size
* fragments. It returns the first fragment with the frag_list field holding
* the remaining fragments.
*/
sctp_chunk_t
*
sctp_fragment_chunk
(
sctp_chunk_t
*
chunk
,
size_t
max_frag_data_len
)
{
sctp_association_t
*
asoc
=
chunk
->
asoc
;
void
*
data_ptr
=
chunk
->
subh
.
data_hdr
;
struct
sctp_sndrcvinfo
*
sinfo
=
&
chunk
->
sinfo
;
__u16
chunk_data_len
=
sctp_data_size
(
chunk
);
__u16
ssn
=
ntohs
(
chunk
->
subh
.
data_hdr
->
ssn
);
sctp_chunk_t
*
first_frag
,
*
frag
;
struct
list_head
*
frag_list
;
int
nfrags
;
__u8
old_flags
,
flags
;
/* nfrags = no. of max size fragments + any smaller last fragment. */
nfrags
=
((
chunk_data_len
/
max_frag_data_len
)
+
((
chunk_data_len
%
max_frag_data_len
)
?
1
:
0
));
/* Start of the data in the chunk. */
data_ptr
+=
sizeof
(
sctp_datahdr_t
);
/* Are we fragmenting an already fragmented large message? */
old_flags
=
chunk
->
chunk_hdr
->
flags
;
if
(
old_flags
&
SCTP_DATA_FIRST_FRAG
)
flags
=
SCTP_DATA_FIRST_FRAG
;
else
flags
=
SCTP_DATA_MIDDLE_FRAG
;
/* Make the first fragment. */
first_frag
=
sctp_make_datafrag
(
asoc
,
sinfo
,
max_frag_data_len
,
data_ptr
,
flags
,
ssn
);
if
(
!
first_frag
)
goto
err
;
first_frag
->
has_ssn
=
1
;
/* All the fragments are added to the frag_list of the first chunk. */
frag_list
=
&
first_frag
->
frag_list
;
chunk_data_len
-=
max_frag_data_len
;
data_ptr
+=
max_frag_data_len
;
/* Make the middle fragments. */
while
(
chunk_data_len
>
max_frag_data_len
)
{
frag
=
sctp_make_datafrag
(
asoc
,
sinfo
,
max_frag_data_len
,
data_ptr
,
SCTP_DATA_MIDDLE_FRAG
,
ssn
);
if
(
!
frag
)
goto
err
;
frag
->
has_ssn
=
1
;
/* Add the middle fragment to the first fragment's
* frag_list.
*/
list_add_tail
(
&
frag
->
frag_list
,
frag_list
);
chunk_data_len
-=
max_frag_data_len
;
data_ptr
+=
max_frag_data_len
;
}
if
(
old_flags
&
SCTP_DATA_LAST_FRAG
)
flags
=
SCTP_DATA_LAST_FRAG
;
else
flags
=
SCTP_DATA_MIDDLE_FRAG
;
/* Make the last fragment. */
frag
=
sctp_make_datafrag
(
asoc
,
sinfo
,
chunk_data_len
,
data_ptr
,
flags
,
ssn
);
if
(
!
frag
)
goto
err
;
frag
->
has_ssn
=
1
;
/* Add the last fragment to the first fragment's frag_list. */
list_add_tail
(
&
frag
->
frag_list
,
frag_list
);
/* Free the original chunk. */
sctp_free_chunk
(
chunk
);
return
first_frag
;
err:
/* Free any fragments that are created before the failure. */
if
(
first_frag
)
{
struct
list_head
*
flist
,
*
lfrag
;
/* Free all the fragments off the first one. */
flist
=
&
first_frag
->
frag_list
;
while
(
NULL
!=
(
lfrag
=
sctp_list_dequeue
(
flist
)))
{
frag
=
list_entry
(
lfrag
,
sctp_chunk_t
,
frag_list
);
sctp_free_chunk
(
frag
);
}
/* Free the first fragment. */
sctp_free_chunk
(
first_frag
);
int
error
=
0
;
if
(
q
->
cork
)
{
q
->
cork
=
0
;
error
=
sctp_outq_flush
(
q
,
0
);
}
return
NULL
;
return
error
;
}
/*
*
sctp_outq_flush -
Try to flush an outqueue.
* Try to flush an outqueue.
*
* Description: Send everything in q which we legally can, subject to
* congestion limitations.
...
...
@@ -724,7 +623,7 @@ int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout)
{
struct
sctp_packet
*
packet
;
struct
sctp_packet
singleton
;
s
ctp_association_t
*
asoc
=
q
->
asoc
;
s
truct
sctp_association
*
asoc
=
q
->
asoc
;
int
ecn_capable
=
asoc
->
peer
.
ecn_capable
;
__u16
sport
=
asoc
->
base
.
bind_addr
.
port
;
__u16
dport
=
asoc
->
peer
.
port
;
...
...
@@ -735,7 +634,7 @@ int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout)
struct
sk_buff_head
*
queue
;
struct
sctp_transport
*
transport
=
NULL
;
struct
sctp_transport
*
new_transport
;
s
ctp_chunk_t
*
chunk
;
s
truct
sctp_chunk
*
chunk
;
sctp_xmit_t
status
;
int
error
=
0
;
int
start_timer
=
0
;
...
...
@@ -762,7 +661,7 @@ int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout)
}
queue
=
&
q
->
control
;
while
((
chunk
=
(
s
ctp_chunk_t
*
)
skb_dequeue
(
queue
)))
{
while
((
chunk
=
(
s
truct
sctp_chunk
*
)
skb_dequeue
(
queue
)))
{
/* Pick the right transport to use. */
new_transport
=
chunk
->
transport
;
...
...
@@ -902,32 +801,25 @@ int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout)
*/
if
(
chunk
->
sinfo
.
sinfo_stream
>=
asoc
->
c
.
sinit_num_ostreams
)
{
struct
sctp_ulpevent
*
ev
;
/* Generate a SEND FAILED event. */
ev
=
sctp_ulpevent_make_send_failed
(
asoc
,
chunk
,
SCTP_DATA_UNSENT
,
SCTP_ERROR_INV_STRM
,
GFP_ATOMIC
);
if
(
ev
)
sctp_ulpq_tail_event
(
&
asoc
->
ulpq
,
ev
);
/* Free the chunk. */
sctp_free_chunk
(
chunk
);
/* Mark as s failed send. */
sctp_datamsg_fail
(
chunk
,
SCTP_ERROR_INV_STRM
);
sctp_chunk_free
(
chunk
);
continue
;
}
/* Now do delayed assignment of SSN. This will
* probably change again when we start supporting
* large (> approximately 2^16) size messages.
*/
sctp_chunk_assign_ssn
(
chunk
);
/* Has this chunk expired? */
if
(
sctp_datamsg_expires
(
chunk
))
{
sctp_datamsg_fail
(
chunk
,
0
);
sctp_chunk_free
(
chunk
);
continue
;
}
/* If there is a specified transport, use it.
* Otherwise, we want to use the active path.
*/
new_transport
=
chunk
->
transport
;
if
(
new_transport
==
NULL
||
!
new_transport
->
active
)
if
(
!
new_transport
||
!
new_transport
->
active
)
new_transport
=
asoc
->
peer
.
active_path
;
/* Change packets if necessary. */
...
...
@@ -979,26 +871,6 @@ int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout)
goto
sctp_flush_out
;
break
;
case
SCTP_XMIT_MUST_FRAG
:
{
sctp_chunk_t
*
frag
;
frag
=
sctp_fragment_chunk
(
chunk
,
packet
->
transport
->
asoc
->
frag_point
);
if
(
!
frag
)
{
/* We could not fragment due to out of
* memory condition. Free the original
* chunk and return ENOMEM.
*/
sctp_free_chunk
(
chunk
);
error
=
-
ENOMEM
;
return
error
;
}
sctp_xmit_fragmented_chunks
(
q
,
packet
,
frag
);
goto
sctp_flush_out
;
break
;
}
case
SCTP_XMIT_OK
:
break
;
...
...
@@ -1077,8 +949,8 @@ int sctp_outq_set_output_handlers(struct sctp_outq *q,
}
/* Update unack_data based on the incoming SACK chunk */
static
void
sctp_sack_update_unack_data
(
s
ctp_association_t
*
assoc
,
s
ctp_sackhdr_t
*
sack
)
static
void
sctp_sack_update_unack_data
(
s
truct
sctp_association
*
assoc
,
s
truct
sctp_sackhdr
*
sack
)
{
sctp_sack_variable_t
*
frags
;
__u16
unack_data
;
...
...
@@ -1096,12 +968,12 @@ static void sctp_sack_update_unack_data(sctp_association_t *assoc,
}
/* Return the highest new tsn that is acknowledged by the given SACK chunk. */
static
__u32
sctp_highest_new_tsn
(
s
ctp_sackhdr_t
*
sack
,
s
ctp_association_t
*
asoc
)
static
__u32
sctp_highest_new_tsn
(
s
truct
sctp_sackhdr
*
sack
,
s
truct
sctp_association
*
asoc
)
{
struct
list_head
*
ltransport
,
*
lchunk
;
struct
sctp_transport
*
transport
;
s
ctp_chunk_t
*
chunk
;
s
truct
sctp_chunk
*
chunk
;
__u32
highest_new_tsn
,
tsn
;
struct
list_head
*
transport_list
=
&
asoc
->
peer
.
transport_addr_list
;
...
...
@@ -1111,7 +983,7 @@ static __u32 sctp_highest_new_tsn(sctp_sackhdr_t *sack,
transport
=
list_entry
(
ltransport
,
struct
sctp_transport
,
transports
);
list_for_each
(
lchunk
,
&
transport
->
transmitted
)
{
chunk
=
list_entry
(
lchunk
,
s
ctp_chunk_t
,
chunk
=
list_entry
(
lchunk
,
s
truct
sctp_chunk
,
transmitted_list
);
tsn
=
ntohl
(
chunk
->
subh
.
data_hdr
->
tsn
);
...
...
@@ -1130,26 +1002,64 @@ static __u32 sctp_highest_new_tsn(sctp_sackhdr_t *sack,
* Process the SACK against the outqueue. Mostly, this just frees
* things off the transmitted queue.
*/
int
sctp_outq_sack
(
struct
sctp_outq
*
q
,
s
ctp_sackhdr_t
*
sack
)
int
sctp_outq_sack
(
struct
sctp_outq
*
q
,
s
truct
sctp_sackhdr
*
sack
)
{
s
ctp_association_t
*
asoc
=
q
->
asoc
;
s
truct
sctp_association
*
asoc
=
q
->
asoc
;
struct
sctp_transport
*
transport
;
s
ctp_chunk_t
*
tchunk
;
s
truct
sctp_chunk
*
tchunk
;
struct
list_head
*
lchunk
,
*
transport_list
,
*
pos
;
sctp_sack_variable_t
*
frags
=
sack
->
variable
;
__u32
sack_ctsn
,
ctsn
,
tsn
;
__u32
highest_tsn
,
highest_new_tsn
;
__u32
sack_a_rwnd
;
int
outstanding
;
struct
sctp_transport
*
primary
=
asoc
->
peer
.
primary_path
;
int
count_of_newacks
=
0
;
/* Grab the association's destination address list. */
transport_list
=
&
asoc
->
peer
.
transport_addr_list
;
sack_ctsn
=
ntohl
(
sack
->
cum_tsn_ack
);
/*
* SFR-CACC algorithm:
* On receipt of a SACK the sender SHOULD execute the
* following statements.
*
* 1) If the cumulative ack in the SACK passes next tsn_at_change
* on the current primary, the CHANGEOVER_ACTIVE flag SHOULD be
* cleared. The CYCLING_CHANGEOVER flag SHOULD also be cleared for
* all destinations.
*/
if
(
TSN_lte
(
primary
->
cacc
.
next_tsn_at_change
,
sack_ctsn
))
{
primary
->
cacc
.
changeover_active
=
0
;
list_for_each
(
pos
,
transport_list
)
{
transport
=
list_entry
(
pos
,
struct
sctp_transport
,
transports
);
transport
->
cacc
.
cycling_changeover
=
0
;
}
}
/*
* SFR-CACC algorithm:
* 2) If the SACK contains gap acks and the flag CHANGEOVER_ACTIVE
* is set the receiver of the SACK MUST take the following actions:
*
* A) Initialize the cacc_saw_newack to 0 for all destination
* addresses.
*/
if
(
sack
->
num_gap_ack_blocks
>
0
&&
primary
->
cacc
.
changeover_active
)
{
list_for_each
(
pos
,
transport_list
)
{
transport
=
list_entry
(
pos
,
struct
sctp_transport
,
transports
);
transport
->
cacc
.
cacc_saw_newack
=
0
;
}
}
/* Get the highest TSN in the sack. */
highest_tsn
=
sack_ctsn
+
ntohs
(
frags
[
ntohs
(
sack
->
num_gap_ack_blocks
)
-
1
].
gab
.
end
);
ntohs
(
frags
[
ntohs
(
sack
->
num_gap_ack_blocks
)
-
1
].
gab
.
end
);
if
(
TSN_lt
(
asoc
->
highest_sacked
,
highest_tsn
))
{
highest_new_tsn
=
highest_tsn
;
...
...
@@ -1162,6 +1072,7 @@ int sctp_outq_sack(struct sctp_outq *q, sctp_sackhdr_t *sack)
* and free those chunks that we can.
*/
sctp_check_transmitted
(
q
,
&
q
->
retransmit
,
NULL
,
sack
,
highest_new_tsn
);
sctp_mark_missing
(
q
,
&
q
->
retransmit
,
NULL
,
highest_new_tsn
,
0
);
/* Run through the transmitted queue.
* Credit bytes received and free those chunks which we can.
...
...
@@ -1173,6 +1084,20 @@ int sctp_outq_sack(struct sctp_outq *q, sctp_sackhdr_t *sack)
transports
);
sctp_check_transmitted
(
q
,
&
transport
->
transmitted
,
transport
,
sack
,
highest_new_tsn
);
/*
* SFR-CACC algorithm:
* C) Let count_of_newacks be the number of
* destinations for which cacc_saw_newack is set.
*/
if
(
transport
->
cacc
.
cacc_saw_newack
)
count_of_newacks
++
;
}
list_for_each
(
pos
,
transport_list
)
{
transport
=
list_entry
(
pos
,
struct
sctp_transport
,
transports
);
sctp_mark_missing
(
q
,
&
transport
->
transmitted
,
transport
,
highest_new_tsn
,
count_of_newacks
);
}
/* Move the Cumulative TSN Ack Point if appropriate. */
...
...
@@ -1191,11 +1116,12 @@ int sctp_outq_sack(struct sctp_outq *q, sctp_sackhdr_t *sack)
/* Throw away stuff rotting on the sack queue. */
list_for_each
(
lchunk
,
&
q
->
sacked
)
{
tchunk
=
list_entry
(
lchunk
,
sctp_chunk_t
,
transmitted_list
);
tchunk
=
list_entry
(
lchunk
,
struct
sctp_chunk
,
transmitted_list
);
tsn
=
ntohl
(
tchunk
->
subh
.
data_hdr
->
tsn
);
if
(
TSN_lte
(
tsn
,
ctsn
))
{
lchunk
=
lchunk
->
prev
;
sctp_
free_chunk
(
tchunk
);
sctp_
chunk_free
(
tchunk
);
}
}
...
...
@@ -1244,12 +1170,9 @@ int sctp_outq_is_empty(const struct sctp_outq *q)
* 2nd Level Abstractions
********************************************************************/
/* Go through a transport's transmitted list or the associ
c
ation's retransmit
/* Go through a transport's transmitted list or the association's retransmit
* list and move chunks that are acked by the Cumulative TSN Ack to q->sacked.
* The retransmit list will not have an associated transport. In case of a
* transmitted list with a transport, the transport's congestion, rto and fast
* retransmit parameters are also updated and if needed a fast retransmit
* process is started.
* The retransmit list will not have an associated transport.
*
* I added coherent debug information output. --xguo
*
...
...
@@ -1260,17 +1183,16 @@ int sctp_outq_is_empty(const struct sctp_outq *q)
static
void
sctp_check_transmitted
(
struct
sctp_outq
*
q
,
struct
list_head
*
transmitted_queue
,
struct
sctp_transport
*
transport
,
s
ctp_sackhdr_t
*
sack
,
s
truct
sctp_sackhdr
*
sack
,
__u32
highest_new_tsn_in_sack
)
{
struct
list_head
*
lchunk
;
s
ctp_chunk_t
*
tchunk
;
s
truct
sctp_chunk
*
tchunk
;
struct
list_head
tlist
;
__u32
tsn
;
__u32
sack_ctsn
;
__u32
rtt
;
__u8
restart_timer
=
0
;
__u8
do_fast_retransmit
=
0
;
int
bytes_acked
=
0
;
/* These state variables are for coherent debug output. --xguo */
...
...
@@ -1294,7 +1216,8 @@ static void sctp_check_transmitted(struct sctp_outq *q,
/* The while loop will skip empty transmitted queues. */
while
(
NULL
!=
(
lchunk
=
sctp_list_dequeue
(
transmitted_queue
)))
{
tchunk
=
list_entry
(
lchunk
,
sctp_chunk_t
,
transmitted_list
);
tchunk
=
list_entry
(
lchunk
,
struct
sctp_chunk
,
transmitted_list
);
tsn
=
ntohl
(
tchunk
->
subh
.
data_hdr
->
tsn
);
if
(
sctp_acked
(
sack
,
tsn
))
{
...
...
@@ -1315,9 +1238,9 @@ static void sctp_check_transmitted(struct sctp_outq *q,
* first instance of the packet or a later
* instance).
*/
if
(
(
!
tchunk
->
tsn_gap_acked
)
&&
(
1
==
tchunk
->
num_times_sent
)
&&
(
tchunk
->
rtt_in_progress
)
)
{
if
(
!
tchunk
->
tsn_gap_acked
&&
!
tchunk
->
resent
&&
tchunk
->
rtt_in_progress
)
{
rtt
=
jiffies
-
tchunk
->
sent_at
;
sctp_transport_update_rto
(
transport
,
rtt
);
...
...
@@ -1338,6 +1261,25 @@ static void sctp_check_transmitted(struct sctp_outq *q,
if
(
!
tchunk
->
tsn_gap_acked
)
{
tchunk
->
tsn_gap_acked
=
1
;
bytes_acked
+=
sctp_data_size
(
tchunk
);
/*
* SFR-CACC algorithm:
* 2) If the SACK contains gap acks
* and the flag CHANGEOVER_ACTIVE is
* set the receiver of the SACK MUST
* take the following action:
*
* B) For each TSN t being acked that
* has not been acked in any SACK so
* far, set cacc_saw_newack to 1 for
* the destination that the TSN was
* sent to.
*/
if
(
transport
&&
sack
->
num_gap_ack_blocks
&&
q
->
asoc
->
peer
.
primary_path
->
cacc
.
changeover_active
)
transport
->
cacc
.
cacc_saw_newack
=
1
;
}
list_add_tail
(
&
tchunk
->
transmitted_list
,
...
...
@@ -1524,8 +1466,8 @@ static void sctp_check_transmitted(struct sctp_outq *q,
* receiver's advertised window is zero, and there is
* only one data chunk in flight to the receiver.
*/
if
(
(
0
==
q
->
asoc
->
peer
.
rwnd
)
&&
(
!
list_empty
(
&
tlist
)
)
&&
if
(
!
q
->
asoc
->
peer
.
rwnd
&&
!
list_empty
(
&
tlist
)
&&
(
sack_ctsn
+
2
==
q
->
asoc
->
next_tsn
))
{
SCTP_DEBUG_PRINTK
(
"%s: SACK received for zero "
"window probe: %u
\n
"
,
...
...
@@ -1553,12 +1495,26 @@ static void sctp_check_transmitted(struct sctp_outq *q,
}
}
/* Reconstruct the transmitted list with chunks that are not yet
* acked by the Cumulative TSN Ack.
*/
while
(
NULL
!=
(
lchunk
=
sctp_list_dequeue
(
&
tlist
)))
{
tchunk
=
list_entry
(
lchunk
,
sctp_chunk_t
,
transmitted_list
);
tsn
=
ntohl
(
tchunk
->
subh
.
data_hdr
->
tsn
);
list_splice
(
&
tlist
,
transmitted_queue
);
}
/* Mark chunks as missing and consequently may get retransmitted. */
static
void
sctp_mark_missing
(
struct
sctp_outq
*
q
,
struct
list_head
*
transmitted_queue
,
struct
sctp_transport
*
transport
,
__u32
highest_new_tsn_in_sack
,
int
count_of_newacks
)
{
struct
sctp_chunk
*
chunk
;
struct
list_head
*
pos
;
__u32
tsn
;
char
do_fast_retransmit
=
0
;
struct
sctp_transport
*
primary
=
q
->
asoc
->
peer
.
primary_path
;
list_for_each
(
pos
,
transmitted_queue
)
{
chunk
=
list_entry
(
pos
,
struct
sctp_chunk
,
transmitted_list
);
tsn
=
ntohl
(
chunk
->
subh
.
data_hdr
->
tsn
);
/* RFC 2960 7.2.4, sctpimpguide-05 2.8.2 M3) Examine all
* 'Unacknowledged TSN's', if the TSN number of an
...
...
@@ -1566,26 +1522,35 @@ static void sctp_check_transmitted(struct sctp_outq *q,
* value, increment the 'TSN.Missing.Report' count on that
* chunk if it has NOT been fast retransmitted or marked for
* fast retransmit already.
*
*/
if
(
!
chunk
->
fast_retransmit
&&
!
chunk
->
tsn_gap_acked
&&
TSN_lt
(
tsn
,
highest_new_tsn_in_sack
))
{
/* SFR-CACC may require us to skip marking
* this chunk as missing.
*/
if
(
!
transport
||
!
sctp_cacc_skip
(
primary
,
transport
,
count_of_newacks
,
tsn
))
{
chunk
->
tsn_missing_report
++
;
SCTP_DEBUG_PRINTK
(
"%s: TSN 0x%x missing counter: %d
\n
"
,
__FUNCTION__
,
tsn
,
chunk
->
tsn_missing_report
);
}
}
/*
* M4) If any DATA chunk is found to have a
* 'TSN.Missing.Report'
* value larger than or equal to 4, mark that chunk for
* retransmission and start the fast retransmit procedure.
*/
if
((
!
tchunk
->
fast_retransmit
)
&&
(
!
tchunk
->
tsn_gap_acked
)
&&
(
TSN_lt
(
tsn
,
highest_new_tsn_in_sack
)))
{
tchunk
->
tsn_missing_report
++
;
SCTP_DEBUG_PRINTK
(
"%s: TSN 0x%x missing counter: %d
\n
"
,
__FUNCTION__
,
tsn
,
tchunk
->
tsn_missing_report
);
}
if
(
tchunk
->
tsn_missing_report
>=
4
)
{
tchunk
->
fast_retransmit
=
1
;
if
(
chunk
->
tsn_missing_report
>=
4
)
{
chunk
->
fast_retransmit
=
1
;
do_fast_retransmit
=
1
;
}
list_add_tail
(
lchunk
,
transmitted_queue
);
}
if
(
transport
)
{
...
...
@@ -1601,7 +1566,7 @@ static void sctp_check_transmitted(struct sctp_outq *q,
}
/* Is the given TSN acked by this packet? */
static
int
sctp_acked
(
s
ctp_sackhdr_t
*
sack
,
__u32
tsn
)
static
int
sctp_acked
(
s
truct
sctp_sackhdr
*
sack
,
__u32
tsn
)
{
int
i
;
sctp_sack_variable_t
*
frags
;
...
...
net/sctp/primitive.c
View file @
3e446c25
...
...
@@ -55,12 +55,12 @@
#define DECLARE_PRIMITIVE(name) \
/* This is called in the code as sctp_primitive_ ## name. */
\
int sctp_primitive_ ## name(s
ctp_association_t
*asoc, \
int sctp_primitive_ ## name(s
truct sctp_association
*asoc, \
void *arg) { \
int error = 0; \
sctp_event_t event_type; sctp_subtype_t subtype; \
sctp_state_t state; \
s
ctp_endpoint_
t *ep; \
s
truct sctp_endpoin
t *ep; \
\
event_type = SCTP_EVENT_T_PRIMITIVE; \
subtype = SCTP_ST_PRIMITIVE(SCTP_PRIMITIVE_ ## name); \
...
...
net/sctp/protocol.c
View file @
3e446c25
...
...
@@ -56,6 +56,7 @@
#include <net/sctp/sctp.h>
#include <net/addrconf.h>
#include <net/inet_common.h>
#include <net/inet_ecn.h>
/* Global data structures. */
struct
sctp_protocol
sctp_proto
;
...
...
@@ -203,7 +204,7 @@ static void sctp_free_local_addr_list(struct sctp_protocol *proto)
/* Copy the local addresses which are valid for 'scope' into 'bp'. */
int
sctp_copy_local_addr_list
(
struct
sctp_protocol
*
proto
,
struct
sctp_bind_addr
*
bp
,
sctp_scope_t
scope
,
int
priority
,
int
copy_flags
)
int
gfp
,
int
copy_flags
)
{
struct
sockaddr_storage_list
*
addr
;
int
error
=
0
;
...
...
@@ -223,8 +224,8 @@ int sctp_copy_local_addr_list(struct sctp_protocol *proto,
(((
AF_INET6
==
addr
->
a
.
sa
.
sa_family
)
&&
(
copy_flags
&
SCTP_ADDR6_ALLOWED
)
&&
(
copy_flags
&
SCTP_ADDR6_PEERSUPP
))))
{
error
=
sctp_add_bind_addr
(
bp
,
&
addr
->
a
,
priority
);
error
=
sctp_add_bind_addr
(
bp
,
&
addr
->
a
,
GFP_ATOMIC
);
if
(
error
)
goto
end_copy
;
}
...
...
@@ -267,11 +268,16 @@ static void sctp_v4_from_sk(union sctp_addr *addr, struct sock *sk)
}
/* Initialize sk->rcv_saddr from sctp_addr. */
static
void
sctp_v4_to_sk
(
union
sctp_addr
*
addr
,
struct
sock
*
sk
)
static
void
sctp_v4_to_sk
_saddr
(
union
sctp_addr
*
addr
,
struct
sock
*
sk
)
{
inet_sk
(
sk
)
->
rcv_saddr
=
addr
->
v4
.
sin_addr
.
s_addr
;
}
/* Initialize sk->daddr from sctp_addr. */
static
void
sctp_v4_to_sk_daddr
(
union
sctp_addr
*
addr
,
struct
sock
*
sk
)
{
inet_sk
(
sk
)
->
daddr
=
addr
->
v4
.
sin_addr
.
s_addr
;
}
/* Initialize a sctp_addr from a dst_entry. */
static
void
sctp_v4_dst_saddr
(
union
sctp_addr
*
saddr
,
struct
dst_entry
*
dst
,
...
...
@@ -388,7 +394,7 @@ struct dst_entry *sctp_v4_get_dst(struct sctp_association *asoc,
{
struct
rtable
*
rt
;
struct
flowi
fl
;
s
ctp_bind_addr_t
*
bp
;
s
truct
sctp_bind_addr
*
bp
;
rwlock_t
*
addr_lock
;
struct
sockaddr_storage_list
*
laddr
;
struct
list_head
*
pos
;
...
...
@@ -471,21 +477,33 @@ struct dst_entry *sctp_v4_get_dst(struct sctp_association *asoc,
/* For v4, the source address is cached in the route entry(dst). So no need
* to cache it separately and hence this is an empty routine.
*/
void
sctp_v4_get_saddr
(
s
ctp_association_t
*
asoc
,
void
sctp_v4_get_saddr
(
s
truct
sctp_association
*
asoc
,
struct
dst_entry
*
dst
,
union
sctp_addr
*
daddr
,
union
sctp_addr
*
saddr
)
{
struct
rtable
*
rt
=
(
struct
rtable
*
)
dst
;
if
(
rt
)
{
saddr
->
v4
.
sin_family
=
AF_INET
;
saddr
->
v4
.
sin_port
=
asoc
->
base
.
bind_addr
.
port
;
saddr
->
v4
.
sin_addr
.
s_addr
=
rt
->
rt_src
;
}
}
/* What interface did this skb arrive on? */
int
sctp_v4_skb_iif
(
const
struct
sk_buff
*
skb
)
static
int
sctp_v4_skb_iif
(
const
struct
sk_buff
*
skb
)
{
return
((
struct
rtable
*
)
skb
->
dst
)
->
rt_iif
;
}
/* Was this packet marked by Explicit Congestion Notification? */
static
int
sctp_v4_is_ce
(
const
struct
sk_buff
*
skb
)
{
return
((
struct
rtable
*
)
skb
->
dst
)
->
rt_iif
;
return
INET_ECN_is_ce
(
skb
->
nh
.
iph
->
tos
);
}
/* Create and initialize a new sk for the socket returned by accept(). */
/* Create and initialize a new sk for the socket returned by accept(). */
struct
sock
*
sctp_v4_create_accept_sk
(
struct
sock
*
sk
,
struct
sctp_association
*
asoc
)
{
...
...
@@ -506,22 +524,27 @@ struct sock *sctp_v4_create_accept_sk(struct sock *sk,
newsk
->
prot
=
sk
->
prot
;
newsk
->
no_check
=
sk
->
no_check
;
newsk
->
reuse
=
sk
->
reuse
;
newsk
->
shutdown
=
sk
->
shutdown
;
newsk
->
destruct
=
inet_sock_destruct
;
newsk
->
zapped
=
0
;
newsk
->
family
=
PF_INET
;
newsk
->
protocol
=
IPPROTO_SCTP
;
newsk
->
backlog_rcv
=
sk
->
prot
->
backlog_rcv
;
newinet
=
inet_sk
(
newsk
);
/* Initialize sk's sport, dport, rcv_saddr and daddr for
* getsockname() and getpeername()
*/
newinet
->
sport
=
inet
->
sport
;
newinet
->
saddr
=
inet
->
saddr
;
newinet
->
rcv_saddr
=
inet
->
saddr
;
newinet
->
dport
=
asoc
->
peer
.
port
;
newinet
->
rcv_saddr
=
inet
->
rcv_
saddr
;
newinet
->
dport
=
htons
(
asoc
->
peer
.
port
)
;
newinet
->
daddr
=
asoc
->
peer
.
primary_addr
.
v4
.
sin_addr
.
s_addr
;
newinet
->
pmtudisc
=
inet
->
pmtudisc
;
newinet
->
id
=
0
;
newinet
->
ttl
=
sysctl_ip_default_ttl
;
newinet
->
mc_loop
=
1
;
newinet
->
mc_ttl
=
1
;
...
...
@@ -568,7 +591,7 @@ int sctp_ctl_sock_init(void)
if
(
sctp_get_pf_specific
(
PF_INET6
))
family
=
PF_INET6
;
else
else
family
=
PF_INET
;
err
=
sock_create
(
family
,
SOCK_SEQPACKET
,
IPPROTO_SCTP
,
...
...
@@ -695,7 +718,7 @@ static int sctp_inet_bind_verify(struct sctp_opt *opt, union sctp_addr *addr)
return
sctp_v4_available
(
addr
);
}
/* Verify that sockaddr looks sendable. Common verification has already
/* Verify that sockaddr looks sendable. Common verification has already
* been taken care of.
*/
static
int
sctp_inet_send_verify
(
struct
sctp_opt
*
opt
,
union
sctp_addr
*
addr
)
...
...
@@ -706,7 +729,7 @@ static int sctp_inet_send_verify(struct sctp_opt *opt, union sctp_addr *addr)
/* Fill in Supported Address Type information for INIT and INIT-ACK
* chunks. Returns number of addresses supported.
*/
static
int
sctp_inet_supported_addrs
(
const
struct
sctp_opt
*
opt
,
static
int
sctp_inet_supported_addrs
(
const
struct
sctp_opt
*
opt
,
__u16
*
types
)
{
types
[
0
]
=
SCTP_PARAM_IPV4_ADDRESS
;
...
...
@@ -805,7 +828,8 @@ struct sctp_af sctp_ipv4_specific = {
.
copy_addrlist
=
sctp_v4_copy_addrlist
,
.
from_skb
=
sctp_v4_from_skb
,
.
from_sk
=
sctp_v4_from_sk
,
.
to_sk
=
sctp_v4_to_sk
,
.
to_sk_saddr
=
sctp_v4_to_sk_saddr
,
.
to_sk_daddr
=
sctp_v4_to_sk_daddr
,
.
dst_saddr
=
sctp_v4_dst_saddr
,
.
cmp_addr
=
sctp_v4_cmp_addr
,
.
addr_valid
=
sctp_v4_addr_valid
,
...
...
@@ -814,6 +838,7 @@ struct sctp_af sctp_ipv4_specific = {
.
available
=
sctp_v4_available
,
.
scope
=
sctp_v4_scope
,
.
skb_iif
=
sctp_v4_skb_iif
,
.
is_ce
=
sctp_v4_is_ce
,
.
net_header_len
=
sizeof
(
struct
iphdr
),
.
sockaddr_len
=
sizeof
(
struct
sockaddr_in
),
.
sa_family
=
AF_INET
,
...
...
net/sctp/sm_make_chunk.c
View file @
3e446c25
...
...
@@ -59,6 +59,8 @@
#include <linux/ipv6.h>
#include <linux/net.h>
#include <linux/inet.h>
#include <asm/scatterlist.h>
#include <linux/crypto.h>
#include <net/sock.h>
#include <linux/skbuff.h>
...
...
@@ -95,7 +97,7 @@ static const sctp_ecn_capable_param_t ecap_param = {
* provided chunk, as most cause codes will be embedded inside an
* abort chunk.
*/
void
sctp_init_cause
(
s
ctp_chunk_t
*
chunk
,
__u16
cause_code
,
void
sctp_init_cause
(
s
truct
sctp_chunk
*
chunk
,
__u16
cause_code
,
const
void
*
payload
,
size_t
paylen
)
{
sctp_errhdr_t
err
;
...
...
@@ -156,14 +158,14 @@ void sctp_init_cause(sctp_chunk_t *chunk, __u16 cause_code,
* Host Name Address (Note 3) Optional 11
* Supported Address Types (Note 4) Optional 12
*/
s
ctp_chunk_t
*
sctp_make_init
(
const
sctp_association_t
*
asoc
,
const
s
ctp_bind_addr_t
*
bp
,
s
truct
sctp_chunk
*
sctp_make_init
(
const
struct
sctp_association
*
asoc
,
const
s
truct
sctp_bind_addr
*
bp
,
int
gfp
,
int
vparam_len
)
{
sctp_inithdr_t
init
;
union
sctp_params
addrs
;
size_t
chunksize
;
s
ctp_chunk_t
*
retval
=
NULL
;
s
truct
sctp_chunk
*
retval
=
NULL
;
int
num_types
,
addrs_len
=
0
;
struct
sctp_opt
*
sp
;
sctp_supported_addrs_param_t
sat
;
...
...
@@ -175,12 +177,9 @@ sctp_chunk_t *sctp_make_init(const sctp_association_t *asoc,
* can be IPv4 and/or IPv6 in any combination.
*/
retval
=
NULL
;
addrs
.
v
=
NULL
;
/* Convert the provided bind address list to raw format */
/* Convert the provided bind address list to raw format
.
*/
addrs
=
sctp_bind_addrs_to_raw
(
bp
,
&
addrs_len
,
gfp
);
if
(
!
addrs
.
v
)
goto
nodata
;
init
.
init_tag
=
htonl
(
asoc
->
c
.
my_vtag
);
init
.
a_rwnd
=
htonl
(
asoc
->
rwnd
);
...
...
@@ -236,12 +235,12 @@ sctp_chunk_t *sctp_make_init(const sctp_association_t *asoc,
return
retval
;
}
s
ctp_chunk_t
*
sctp_make_init_ack
(
const
sctp_association_t
*
asoc
,
const
s
ctp_chunk_t
*
chunk
,
s
truct
sctp_chunk
*
sctp_make_init_ack
(
const
struct
sctp_association
*
asoc
,
const
s
truct
sctp_chunk
*
chunk
,
int
gfp
,
int
unkparam_len
)
{
sctp_inithdr_t
initack
;
s
ctp_chunk_t
*
retval
;
s
truct
sctp_chunk
*
retval
;
union
sctp_params
addrs
;
int
addrs_len
;
sctp_cookie_param_t
*
cookie
;
...
...
@@ -250,9 +249,8 @@ sctp_chunk_t *sctp_make_init_ack(const sctp_association_t *asoc,
retval
=
NULL
;
/* Note: there may be no addresses to embed. */
addrs
=
sctp_bind_addrs_to_raw
(
&
asoc
->
base
.
bind_addr
,
&
addrs_len
,
gfp
);
if
(
!
addrs
.
v
)
goto
nomem_rawaddr
;
initack
.
init_tag
=
htonl
(
asoc
->
c
.
my_vtag
);
initack
.
a_rwnd
=
htonl
(
asoc
->
rwnd
);
...
...
@@ -294,7 +292,7 @@ sctp_chunk_t *sctp_make_init_ack(const sctp_association_t *asoc,
sctp_addto_chunk
(
retval
,
sizeof
(
ecap_param
),
&
ecap_param
);
/* We need to remove the const qualifier at this point. */
retval
->
asoc
=
(
s
ctp_association_t
*
)
asoc
;
retval
->
asoc
=
(
s
truct
sctp_association
*
)
asoc
;
/* RFC 2960 6.4 Multi-homed SCTP Endpoints
*
...
...
@@ -311,8 +309,8 @@ sctp_chunk_t *sctp_make_init_ack(const sctp_association_t *asoc,
nomem_chunk:
kfree
(
cookie
);
nomem_cookie:
kfree
(
addrs
.
v
);
nomem_rawaddr:
if
(
addrs
.
v
)
kfree
(
addrs
.
v
);
return
retval
;
}
...
...
@@ -350,10 +348,10 @@ sctp_chunk_t *sctp_make_init_ack(const sctp_association_t *asoc,
* An implementation SHOULD make the cookie as small as possible
* to insure interoperability.
*/
s
ctp_chunk_t
*
sctp_make_cookie_echo
(
const
sctp_association_t
*
asoc
,
const
s
ctp_chunk_t
*
chunk
)
s
truct
sctp_chunk
*
sctp_make_cookie_echo
(
const
struct
sctp_association
*
asoc
,
const
s
truct
sctp_chunk
*
chunk
)
{
s
ctp_chunk_t
*
retval
;
s
truct
sctp_chunk
*
retval
;
void
*
cookie
;
int
cookie_len
;
...
...
@@ -401,10 +399,10 @@ sctp_chunk_t *sctp_make_cookie_echo(const sctp_association_t *asoc,
*
* Set to zero on transmit and ignored on receipt.
*/
s
ctp_chunk_t
*
sctp_make_cookie_ack
(
const
sctp_association_t
*
asoc
,
const
s
ctp_chunk_t
*
chunk
)
s
truct
sctp_chunk
*
sctp_make_cookie_ack
(
const
struct
sctp_association
*
asoc
,
const
s
truct
sctp_chunk
*
chunk
)
{
s
ctp_chunk_t
*
retval
;
s
truct
sctp_chunk
*
retval
;
retval
=
sctp_make_chunk
(
asoc
,
SCTP_CID_COOKIE_ACK
,
0
,
0
);
...
...
@@ -446,11 +444,11 @@ sctp_chunk_t *sctp_make_cookie_ack(const sctp_association_t *asoc,
*
* Note: The CWR is considered a Control chunk.
*/
s
ctp_chunk_t
*
sctp_make_cwr
(
const
sctp_association_t
*
asoc
,
s
truct
sctp_chunk
*
sctp_make_cwr
(
const
struct
sctp_association
*
asoc
,
const
__u32
lowest_tsn
,
const
s
ctp_chunk_t
*
chunk
)
const
s
truct
sctp_chunk
*
chunk
)
{
s
ctp_chunk_t
*
retval
;
s
truct
sctp_chunk
*
retval
;
sctp_cwrhdr_t
cwr
;
cwr
.
lowest_tsn
=
htonl
(
lowest_tsn
);
...
...
@@ -481,10 +479,10 @@ sctp_chunk_t *sctp_make_cwr(const sctp_association_t *asoc,
}
/* Make an ECNE chunk. This is a congestion experienced report. */
s
ctp_chunk_t
*
sctp_make_ecne
(
const
sctp_association_t
*
asoc
,
s
truct
sctp_chunk
*
sctp_make_ecne
(
const
struct
sctp_association
*
asoc
,
const
__u32
lowest_tsn
)
{
s
ctp_chunk_t
*
retval
;
s
truct
sctp_chunk
*
retval
;
sctp_ecnehdr_t
ecne
;
ecne
.
lowest_tsn
=
htonl
(
lowest_tsn
);
...
...
@@ -502,25 +500,27 @@ sctp_chunk_t *sctp_make_ecne(const sctp_association_t *asoc,
/* Make a DATA chunk for the given association from the provided
* parameters. However, do not populate the data payload.
*/
s
ctp_chunk_t
*
sctp_make_datafrag_empty
(
sctp_association_t
*
asoc
,
s
truct
sctp_chunk
*
sctp_make_datafrag_empty
(
struct
sctp_association
*
asoc
,
const
struct
sctp_sndrcvinfo
*
sinfo
,
int
data_len
,
__u8
flags
,
__u16
ssn
)
{
s
ctp_chunk_t
*
retval
;
s
ctp_datahdr_t
dp
;
s
truct
sctp_chunk
*
retval
;
s
truct
sctp_datahdr
dp
;
int
chunk_len
;
/* We assign the TSN as LATE as possible, not here when
* creating the chunk.
*/
dp
.
tsn
=
1000000
;
/* This marker is a debugging aid. */
dp
.
tsn
=
0
;
dp
.
stream
=
htons
(
sinfo
->
sinfo_stream
);
dp
.
ppid
=
htonl
(
sinfo
->
sinfo_ppid
);
dp
.
ssn
=
htons
(
ssn
);
/* Set the flags for an unordered send. */
if
(
sinfo
->
sinfo_flags
&
MSG_UNORDERED
)
if
(
sinfo
->
sinfo_flags
&
MSG_UNORDERED
)
{
flags
|=
SCTP_DATA_UNORDERED
;
dp
.
ssn
=
0
;
}
else
dp
.
ssn
=
htons
(
ssn
);
chunk_len
=
sizeof
(
dp
)
+
data_len
;
retval
=
sctp_make_chunk
(
asoc
,
SCTP_CID_DATA
,
flags
,
chunk_len
);
...
...
@@ -537,12 +537,12 @@ sctp_chunk_t *sctp_make_datafrag_empty(sctp_association_t *asoc,
/* Make a DATA chunk for the given association. Populate the data
* payload.
*/
s
ctp_chunk_t
*
sctp_make_datafrag
(
sctp_association_t
*
asoc
,
s
truct
sctp_chunk
*
sctp_make_datafrag
(
struct
sctp_association
*
asoc
,
const
struct
sctp_sndrcvinfo
*
sinfo
,
int
data_len
,
const
__u8
*
data
,
__u8
flags
,
__u16
ssn
)
{
s
ctp_chunk_t
*
retval
;
s
truct
sctp_chunk
*
retval
;
retval
=
sctp_make_datafrag_empty
(
asoc
,
sinfo
,
data_len
,
flags
,
ssn
);
if
(
retval
)
...
...
@@ -554,11 +554,11 @@ sctp_chunk_t *sctp_make_datafrag(sctp_association_t *asoc,
/* Make a DATA chunk for the given association to ride on stream id
* 'stream', with a payload id of 'payload', and a body of 'data'.
*/
s
ctp_chunk_t
*
sctp_make_data
(
sctp_association_t
*
asoc
,
s
truct
sctp_chunk
*
sctp_make_data
(
struct
sctp_association
*
asoc
,
const
struct
sctp_sndrcvinfo
*
sinfo
,
int
data_len
,
const
__u8
*
data
)
{
s
ctp_chunk_t
*
retval
=
NULL
;
s
truct
sctp_chunk
*
retval
=
NULL
;
retval
=
sctp_make_data_empty
(
asoc
,
sinfo
,
data_len
);
if
(
retval
)
...
...
@@ -571,7 +571,7 @@ sctp_chunk_t *sctp_make_data(sctp_association_t *asoc,
* hold 'data_len' octets of data. We use this version when we need
* to build the message AFTER allocating memory.
*/
s
ctp_chunk_t
*
sctp_make_data_empty
(
sctp_association_t
*
asoc
,
s
truct
sctp_chunk
*
sctp_make_data_empty
(
struct
sctp_association
*
asoc
,
const
struct
sctp_sndrcvinfo
*
sinfo
,
int
data_len
)
{
...
...
@@ -584,9 +584,9 @@ sctp_chunk_t *sctp_make_data_empty(sctp_association_t *asoc,
* association. This reports on which TSN's we've seen to date,
* including duplicates and gaps.
*/
s
ctp_chunk_t
*
sctp_make_sack
(
const
sctp_association_t
*
asoc
)
s
truct
sctp_chunk
*
sctp_make_sack
(
const
struct
sctp_association
*
asoc
)
{
s
ctp_chunk_t
*
retval
;
s
truct
sctp_chunk
*
retval
;
sctp_sackhdr_t
sack
;
sctp_gap_ack_block_t
gab
;
int
length
;
...
...
@@ -599,11 +599,13 @@ sctp_chunk_t *sctp_make_sack(const sctp_association_t *asoc)
SCTP_DEBUG_PRINTK
(
"sackCTSNAck sent is 0x%x.
\n
"
,
ctsn
);
/* Count the number of Gap Ack Blocks. */
sctp_tsnmap_iter_init
(
map
,
&
iter
);
for
(
num_gabs
=
0
;
sctp_tsnmap_next_gap_ack
(
map
,
&
iter
,
&
gab
.
start
,
&
gab
.
end
);
num_gabs
++
)
{
/* Do nothing. */
num_gabs
=
0
;
if
(
sctp_tsnmap_has_gap
(
map
))
{
sctp_tsnmap_iter_init
(
map
,
&
iter
);
while
(
sctp_tsnmap_next_gap_ack
(
map
,
&
iter
,
&
gab
.
start
,
&
gab
.
end
))
num_gabs
++
;
}
num_dup_tsns
=
sctp_tsnmap_num_dups
(
map
);
...
...
@@ -659,11 +661,15 @@ sctp_chunk_t *sctp_make_sack(const sctp_association_t *asoc)
sctp_addto_chunk
(
retval
,
sizeof
(
sack
),
&
sack
);
/* Put the Gap Ack Blocks into the chunk. */
sctp_tsnmap_iter_init
(
map
,
&
iter
);
while
(
sctp_tsnmap_next_gap_ack
(
map
,
&
iter
,
&
gab
.
start
,
&
gab
.
end
))
{
gab
.
start
=
htons
(
gab
.
start
);
gab
.
end
=
htons
(
gab
.
end
);
sctp_addto_chunk
(
retval
,
sizeof
(
sctp_gap_ack_block_t
),
&
gab
);
if
(
num_gabs
)
{
sctp_tsnmap_iter_init
(
map
,
&
iter
);
while
(
sctp_tsnmap_next_gap_ack
(
map
,
&
iter
,
&
gab
.
start
,
&
gab
.
end
))
{
gab
.
start
=
htons
(
gab
.
start
);
gab
.
end
=
htons
(
gab
.
end
);
sctp_addto_chunk
(
retval
,
sizeof
(
sctp_gap_ack_block_t
),
&
gab
);
}
}
/* Register the duplicates. */
...
...
@@ -675,9 +681,9 @@ sctp_chunk_t *sctp_make_sack(const sctp_association_t *asoc)
}
/* Make a SHUTDOWN chunk. */
s
ctp_chunk_t
*
sctp_make_shutdown
(
const
sctp_association_t
*
asoc
)
s
truct
sctp_chunk
*
sctp_make_shutdown
(
const
struct
sctp_association
*
asoc
)
{
s
ctp_chunk_t
*
retval
;
s
truct
sctp_chunk
*
retval
;
sctp_shutdownhdr_t
shut
;
__u32
ctsn
;
...
...
@@ -695,10 +701,10 @@ sctp_chunk_t *sctp_make_shutdown(const sctp_association_t *asoc)
return
retval
;
}
s
ctp_chunk_t
*
sctp_make_shutdown_ack
(
const
sctp_association_t
*
asoc
,
const
s
ctp_chunk_t
*
chunk
)
s
truct
sctp_chunk
*
sctp_make_shutdown_ack
(
const
struct
sctp_association
*
asoc
,
const
s
truct
sctp_chunk
*
chunk
)
{
s
ctp_chunk_t
*
retval
;
s
truct
sctp_chunk
*
retval
;
retval
=
sctp_make_chunk
(
asoc
,
SCTP_CID_SHUTDOWN_ACK
,
0
,
0
);
...
...
@@ -717,10 +723,11 @@ sctp_chunk_t *sctp_make_shutdown_ack(const sctp_association_t *asoc,
return
retval
;
}
sctp_chunk_t
*
sctp_make_shutdown_complete
(
const
sctp_association_t
*
asoc
,
const
sctp_chunk_t
*
chunk
)
struct
sctp_chunk
*
sctp_make_shutdown_complete
(
const
struct
sctp_association
*
asoc
,
const
struct
sctp_chunk
*
chunk
)
{
s
ctp_chunk_t
*
retval
;
s
truct
sctp_chunk
*
retval
;
__u8
flags
=
0
;
/* Maybe set the T-bit if we have no association. */
...
...
@@ -747,11 +754,11 @@ sctp_chunk_t *sctp_make_shutdown_complete(const sctp_association_t *asoc,
/* Create an ABORT. Note that we set the T bit if we have no
* association.
*/
s
ctp_chunk_t
*
sctp_make_abort
(
const
sctp_association_t
*
asoc
,
const
s
ctp_chunk_t
*
chunk
,
s
truct
sctp_chunk
*
sctp_make_abort
(
const
struct
sctp_association
*
asoc
,
const
s
truct
sctp_chunk
*
chunk
,
const
size_t
hint
)
{
s
ctp_chunk_t
*
retval
;
s
truct
sctp_chunk
*
retval
;
__u8
flags
=
0
;
/* Maybe set the T-bit if we have no association. */
...
...
@@ -775,10 +782,11 @@ sctp_chunk_t *sctp_make_abort(const sctp_association_t *asoc,
}
/* Helper to create ABORT with a NO_USER_DATA error. */
sctp_chunk_t
*
sctp_make_abort_no_data
(
const
sctp_association_t
*
asoc
,
const
sctp_chunk_t
*
chunk
,
__u32
tsn
)
struct
sctp_chunk
*
sctp_make_abort_no_data
(
const
struct
sctp_association
*
asoc
,
const
struct
sctp_chunk
*
chunk
,
__u32
tsn
)
{
s
ctp_chunk_t
*
retval
;
s
truct
sctp_chunk
*
retval
;
__u32
payload
;
retval
=
sctp_make_abort
(
asoc
,
chunk
,
sizeof
(
sctp_errhdr_t
)
...
...
@@ -809,17 +817,22 @@ sctp_chunk_t *sctp_make_abort_no_data(const sctp_association_t *asoc,
}
/* Helper to create ABORT with a SCTP_ERROR_USER_ABORT error. */
s
ctp_chunk_t
*
sctp_make_abort_user
(
const
sctp_association_t
*
asoc
,
const
s
ctp_chunk_t
*
chunk
,
s
truct
sctp_chunk
*
sctp_make_abort_user
(
const
struct
sctp_association
*
asoc
,
const
s
truct
sctp_chunk
*
chunk
,
const
struct
msghdr
*
msg
)
{
s
ctp_chunk_t
*
retval
;
s
truct
sctp_chunk
*
retval
;
void
*
payload
=
NULL
,
*
payoff
;
size_t
paylen
;
struct
iovec
*
iov
=
msg
->
msg_iov
;
int
iovlen
=
msg
->
msg_iovlen
;
size_t
paylen
=
0
;
struct
iovec
*
iov
=
NULL
;
int
iovlen
=
0
;
if
(
msg
)
{
iov
=
msg
->
msg_iov
;
iovlen
=
msg
->
msg_iovlen
;
paylen
=
get_user_iov_size
(
iov
,
iovlen
);
}
paylen
=
get_user_iov_size
(
iov
,
iovlen
);
retval
=
sctp_make_abort
(
asoc
,
chunk
,
sizeof
(
sctp_errhdr_t
)
+
paylen
);
if
(
!
retval
)
goto
err_chunk
;
...
...
@@ -832,7 +845,7 @@ sctp_chunk_t *sctp_make_abort_user(const sctp_association_t *asoc,
payoff
=
payload
;
for
(;
iovlen
>
0
;
--
iovlen
)
{
if
(
copy_from_user
(
payoff
,
iov
->
iov_base
,
iov
->
iov_len
))
if
(
copy_from_user
(
payoff
,
iov
->
iov_base
,
iov
->
iov_len
))
goto
err_copy
;
payoff
+=
iov
->
iov_len
;
iov
++
;
...
...
@@ -849,18 +862,18 @@ sctp_chunk_t *sctp_make_abort_user(const sctp_association_t *asoc,
err_copy:
kfree
(
payload
);
err_payload:
sctp_
free_chunk
(
retval
);
sctp_
chunk_free
(
retval
);
retval
=
NULL
;
err_chunk:
return
retval
;
}
/* Make a HEARTBEAT chunk. */
s
ctp_chunk_t
*
sctp_make_heartbeat
(
const
sctp_association_t
*
asoc
,
s
truct
sctp_chunk
*
sctp_make_heartbeat
(
const
struct
sctp_association
*
asoc
,
const
struct
sctp_transport
*
transport
,
const
void
*
payload
,
const
size_t
paylen
)
{
s
ctp_chunk_t
*
retval
=
sctp_make_chunk
(
asoc
,
SCTP_CID_HEARTBEAT
,
s
truct
sctp_chunk
*
retval
=
sctp_make_chunk
(
asoc
,
SCTP_CID_HEARTBEAT
,
0
,
paylen
);
if
(
!
retval
)
...
...
@@ -876,15 +889,16 @@ sctp_chunk_t *sctp_make_heartbeat(const sctp_association_t *asoc,
return
retval
;
}
s
ctp_chunk_t
*
sctp_make_heartbeat_ack
(
const
sctp_association_t
*
asoc
,
const
s
ctp_chunk_t
*
chunk
,
s
truct
sctp_chunk
*
sctp_make_heartbeat_ack
(
const
struct
sctp_association
*
asoc
,
const
s
truct
sctp_chunk
*
chunk
,
const
void
*
payload
,
const
size_t
paylen
)
{
sctp_chunk_t
*
retval
=
sctp_make_chunk
(
asoc
,
SCTP_CID_HEARTBEAT_ACK
,
0
,
paylen
);
struct
sctp_chunk
*
retval
;
retval
=
sctp_make_chunk
(
asoc
,
SCTP_CID_HEARTBEAT_ACK
,
0
,
paylen
);
if
(
!
retval
)
goto
nodata
;
retval
->
subh
.
hbs_hdr
=
sctp_addto_chunk
(
retval
,
paylen
,
payload
);
/* RFC 2960 6.4 Multi-homed SCTP Endpoints
...
...
@@ -906,11 +920,12 @@ sctp_chunk_t *sctp_make_heartbeat_ack(const sctp_association_t *asoc,
/* Create an Operation Error chunk with the specified space reserved.
* This routine can be used for containing multiple causes in the chunk.
*/
sctp_chunk_t
*
sctp_make_op_error_space
(
const
sctp_association_t
*
asoc
,
const
sctp_chunk_t
*
chunk
,
size_t
size
)
struct
sctp_chunk
*
sctp_make_op_error_space
(
const
struct
sctp_association
*
asoc
,
const
struct
sctp_chunk
*
chunk
,
size_t
size
)
{
s
ctp_chunk_t
*
retval
;
s
truct
sctp_chunk
*
retval
;
retval
=
sctp_make_chunk
(
asoc
,
SCTP_CID_ERROR
,
0
,
sizeof
(
sctp_errhdr_t
)
+
size
);
...
...
@@ -933,13 +948,14 @@ sctp_chunk_t *sctp_make_op_error_space(const sctp_association_t *asoc,
}
/* Create an Operation Error chunk. */
s
ctp_chunk_t
*
sctp_make_op_error
(
const
sctp_association_t
*
asoc
,
const
s
ctp_chunk_t
*
chunk
,
s
truct
sctp_chunk
*
sctp_make_op_error
(
const
struct
sctp_association
*
asoc
,
const
s
truct
sctp_chunk
*
chunk
,
__u16
cause_code
,
const
void
*
payload
,
size_t
paylen
)
{
s
ctp_chunk_t
*
retval
=
sctp_make_op_error_space
(
asoc
,
chunk
,
paylen
)
;
s
truct
sctp_chunk
*
retval
;
retval
=
sctp_make_op_error_space
(
asoc
,
chunk
,
paylen
);
if
(
!
retval
)
goto
nodata
;
...
...
@@ -956,26 +972,27 @@ sctp_chunk_t *sctp_make_op_error(const sctp_association_t *asoc,
/* Turn an skb into a chunk.
* FIXME: Eventually move the structure directly inside the skb->cb[].
*/
sctp_chunk_t
*
sctp_chunkify
(
struct
sk_buff
*
skb
,
const
sctp_association_t
*
asoc
,
struct
sctp_chunk
*
sctp_chunkify
(
struct
sk_buff
*
skb
,
const
struct
sctp_association
*
asoc
,
struct
sock
*
sk
)
{
s
ctp_chunk_t
*
retval
=
t_new
(
sctp_chunk_t
,
GFP_ATOMIC
);
s
truct
sctp_chunk
*
retval
=
t_new
(
struct
sctp_chunk
,
GFP_ATOMIC
);
if
(
!
retval
)
goto
nodata
;
memset
(
retval
,
0
,
sizeof
(
s
ctp_chunk_t
));
memset
(
retval
,
0
,
sizeof
(
s
truct
sctp_chunk
));
if
(
!
sk
)
{
SCTP_DEBUG_PRINTK
(
"chunkifying skb %p w/o an sk
\n
"
,
skb
);
}
retval
->
skb
=
skb
;
retval
->
asoc
=
(
s
ctp_association_t
*
)
asoc
;
retval
->
num_times_sent
=
0
;
retval
->
asoc
=
(
s
truct
sctp_association
*
)
asoc
;
retval
->
resent
=
0
;
retval
->
has_tsn
=
0
;
retval
->
has_ssn
=
0
;
retval
->
rtt_in_progress
=
0
;
retval
->
sent_at
=
jiffies
;
retval
->
sent_at
=
0
;
retval
->
singleton
=
1
;
retval
->
end_of_packet
=
0
;
retval
->
ecn_ce_done
=
0
;
...
...
@@ -991,17 +1008,24 @@ sctp_chunk_t *sctp_chunkify(struct sk_buff *skb, const sctp_association_t *asoc,
retval
->
tsn_gap_acked
=
0
;
retval
->
fast_retransmit
=
0
;
/* If this is a fragmented message, track all fragments
* of the message (for SEND_FAILED).
*/
retval
->
msg
=
NULL
;
/* Polish the bead hole. */
INIT_LIST_HEAD
(
&
retval
->
transmitted_list
);
INIT_LIST_HEAD
(
&
retval
->
frag_list
);
SCTP_DBG_OBJCNT_INC
(
chunk
);
atomic_set
(
&
retval
->
refcnt
,
1
);
nodata:
return
retval
;
}
/* Set chunk->source and dest based on the IP header in chunk->skb. */
void
sctp_init_addrs
(
s
ctp_chunk_t
*
chunk
,
union
sctp_addr
*
src
,
void
sctp_init_addrs
(
s
truct
sctp_chunk
*
chunk
,
union
sctp_addr
*
src
,
union
sctp_addr
*
dest
)
{
memcpy
(
&
chunk
->
source
,
src
,
sizeof
(
union
sctp_addr
));
...
...
@@ -1009,7 +1033,7 @@ void sctp_init_addrs(sctp_chunk_t *chunk, union sctp_addr *src,
}
/* Extract the source address from a chunk. */
const
union
sctp_addr
*
sctp_source
(
const
s
ctp_chunk_t
*
chunk
)
const
union
sctp_addr
*
sctp_source
(
const
s
truct
sctp_chunk
*
chunk
)
{
/* If we have a known transport, use that. */
if
(
chunk
->
transport
)
{
...
...
@@ -1023,16 +1047,16 @@ const union sctp_addr *sctp_source(const sctp_chunk_t *chunk)
/* Create a new chunk, setting the type and flags headers from the
* arguments, reserving enough space for a 'paylen' byte payload.
*/
s
ctp_chunk_t
*
sctp_make_chunk
(
const
sctp_association_t
*
asoc
,
s
truct
sctp_chunk
*
sctp_make_chunk
(
const
struct
sctp_association
*
asoc
,
__u8
type
,
__u8
flags
,
int
paylen
)
{
s
ctp_chunk_t
*
retval
;
s
truct
sctp_chunk
*
retval
;
sctp_chunkhdr_t
*
chunk_hdr
;
struct
sk_buff
*
skb
;
struct
sock
*
sk
;
/* No need to allocate LL here, as this is only a chunk. */
skb
=
alloc_skb
(
WORD_ROUND
(
sizeof
(
sctp_chunkhdr_t
)
+
paylen
),
skb
=
alloc_skb
(
WORD_ROUND
(
sizeof
(
sctp_chunkhdr_t
)
+
paylen
),
GFP_ATOMIC
);
if
(
!
skb
)
goto
nodata
;
...
...
@@ -1046,7 +1070,7 @@ sctp_chunk_t *sctp_make_chunk(const sctp_association_t *asoc,
sk
=
asoc
?
asoc
->
base
.
sk
:
NULL
;
retval
=
sctp_chunkify
(
skb
,
asoc
,
sk
);
if
(
!
retval
)
{
dev_
kfree_skb
(
skb
);
kfree_skb
(
skb
);
goto
nodata
;
}
...
...
@@ -1062,12 +1086,10 @@ sctp_chunk_t *sctp_make_chunk(const sctp_association_t *asoc,
return
NULL
;
}
/* Release the memory occupied by a chunk. */
void
sctp_free_chunk
(
sctp_chunk_t
*
chunk
)
static
void
sctp_chunk_destroy
(
struct
sctp_chunk
*
chunk
)
{
/* Make sure that we are not on any list. */
skb_unlink
((
struct
sk_buff
*
)
chunk
);
list_del
(
&
chunk
->
transmitted_list
);
/* Free the chunk skb data and the SCTP_chunk stub itself. */
dev_kfree_skb
(
chunk
->
skb
);
...
...
@@ -1076,11 +1098,37 @@ void sctp_free_chunk(sctp_chunk_t *chunk)
SCTP_DBG_OBJCNT_DEC
(
chunk
);
}
/* Possibly, free the chunk. */
void
sctp_chunk_free
(
struct
sctp_chunk
*
chunk
)
{
/* Make sure that we are not on any list. */
skb_unlink
((
struct
sk_buff
*
)
chunk
);
list_del
(
&
chunk
->
transmitted_list
);
/* Release our reference on the message tracker. */
if
(
chunk
->
msg
)
sctp_datamsg_put
(
chunk
->
msg
);
sctp_chunk_put
(
chunk
);
}
/* Grab a reference to the chunk. */
void
sctp_chunk_hold
(
struct
sctp_chunk
*
ch
)
{
atomic_inc
(
&
ch
->
refcnt
);
}
/* Release a reference to the chunk. */
void
sctp_chunk_put
(
struct
sctp_chunk
*
ch
)
{
if
(
atomic_dec_and_test
(
&
ch
->
refcnt
))
sctp_chunk_destroy
(
ch
);
}
/* Append bytes to the end of a chunk. Will panic if chunk is not big
* enough.
*/
void
*
sctp_addto_chunk
(
s
ctp_chunk_t
*
chunk
,
int
len
,
const
void
*
data
)
void
*
sctp_addto_chunk
(
s
truct
sctp_chunk
*
chunk
,
int
len
,
const
void
*
data
)
{
void
*
target
;
void
*
padding
;
...
...
@@ -1104,8 +1152,8 @@ void *sctp_addto_chunk(sctp_chunk_t *chunk, int len, const void *data)
* chunk is not big enough.
* Returns a kernel err value.
*/
static
int
sctp_user_addto_chunk
(
sctp_chunk_t
*
chunk
,
int
off
,
int
len
,
struct
iovec
*
data
)
int
sctp_user_addto_chunk
(
struct
sctp_chunk
*
chunk
,
int
off
,
int
len
,
struct
iovec
*
data
)
{
__u8
*
target
;
int
err
=
0
;
...
...
@@ -1126,132 +1174,10 @@ static int sctp_user_addto_chunk(sctp_chunk_t *chunk, int off, int len,
return
err
;
}
/* A data chunk can have a maximum payload of (2^16 - 20). Break
* down any such message into smaller chunks. Opportunistically, fragment
* the chunks down to the current MTU constraints. We may get refragmented
* later if the PMTU changes, but it is _much better_ to fragment immediately
* with a reasonable guess than always doing our fragmentation on the
* soft-interrupt.
*/
int
sctp_datachunks_from_user
(
sctp_association_t
*
asoc
,
const
struct
sctp_sndrcvinfo
*
sinfo
,
struct
msghdr
*
msg
,
int
msg_len
,
struct
sk_buff_head
*
chunks
)
{
int
max
,
whole
,
i
,
offset
,
over
,
err
;
int
len
,
first_len
;
sctp_chunk_t
*
chunk
;
__u8
frag
;
/* What is a reasonable fragmentation point right now? */
max
=
asoc
->
pmtu
;
if
(
max
<
SCTP_MIN_PMTU
)
max
=
SCTP_MIN_PMTU
;
max
-=
SCTP_IP_OVERHEAD
;
/* Make sure not beyond maximum chunk size. */
if
(
max
>
SCTP_MAX_CHUNK_LEN
)
max
=
SCTP_MAX_CHUNK_LEN
;
/* Subtract out the overhead of a data chunk header. */
max
-=
sizeof
(
struct
sctp_data_chunk
);
whole
=
0
;
first_len
=
max
;
/* Encourage Cookie-ECHO bundling. */
if
(
asoc
->
state
<
SCTP_STATE_COOKIE_ECHOED
)
{
whole
=
msg_len
/
(
max
-
SCTP_ARBITRARY_COOKIE_ECHO_LEN
);
/* Account for the DATA to be bundled with the COOKIE-ECHO. */
if
(
whole
)
{
first_len
=
max
-
SCTP_ARBITRARY_COOKIE_ECHO_LEN
;
msg_len
-=
first_len
;
whole
=
1
;
}
}
/* How many full sized? How many bytes leftover? */
whole
+=
msg_len
/
max
;
over
=
msg_len
%
max
;
offset
=
0
;
if
(
whole
&&
over
)
SCTP_INC_STATS_USER
(
SctpFragUsrMsgs
);
/* Create chunks for all the full sized DATA chunks. */
for
(
i
=
0
,
len
=
first_len
;
i
<
whole
;
i
++
)
{
frag
=
SCTP_DATA_MIDDLE_FRAG
;
if
(
0
==
i
)
frag
|=
SCTP_DATA_FIRST_FRAG
;
if
((
i
==
(
whole
-
1
))
&&
!
over
)
frag
|=
SCTP_DATA_LAST_FRAG
;
chunk
=
sctp_make_datafrag_empty
(
asoc
,
sinfo
,
len
,
frag
,
0
);
if
(
!
chunk
)
goto
nomem
;
err
=
sctp_user_addto_chunk
(
chunk
,
offset
,
len
,
msg
->
msg_iov
);
if
(
err
<
0
)
goto
errout
;
offset
+=
len
;
/* Put the chunk->skb back into the form expected by send. */
__skb_pull
(
chunk
->
skb
,
(
__u8
*
)
chunk
->
chunk_hdr
-
(
__u8
*
)
chunk
->
skb
->
data
);
__skb_queue_tail
(
chunks
,
(
struct
sk_buff
*
)
chunk
);
/* The first chunk, the first chunk was likely short
* to allow bundling, so reset to full size.
*/
if
(
0
==
i
)
len
=
max
;
}
/* .. now the leftover bytes. */
if
(
over
)
{
if
(
!
whole
)
frag
=
SCTP_DATA_NOT_FRAG
;
else
frag
=
SCTP_DATA_LAST_FRAG
;
chunk
=
sctp_make_datafrag_empty
(
asoc
,
sinfo
,
over
,
frag
,
0
);
if
(
!
chunk
)
goto
nomem
;
err
=
sctp_user_addto_chunk
(
chunk
,
offset
,
over
,
msg
->
msg_iov
);
/* Put the chunk->skb back into the form expected by send. */
__skb_pull
(
chunk
->
skb
,
(
__u8
*
)
chunk
->
chunk_hdr
-
(
__u8
*
)
chunk
->
skb
->
data
);
if
(
err
<
0
)
goto
errout
;
__skb_queue_tail
(
chunks
,
(
struct
sk_buff
*
)
chunk
);
}
err
=
0
;
goto
out
;
nomem:
err
=
-
ENOMEM
;
errout:
while
((
chunk
=
(
sctp_chunk_t
*
)
__skb_dequeue
(
chunks
)))
sctp_free_chunk
(
chunk
);
out:
return
err
;
}
/* Helper function to assign a TSN if needed. This assumes that both
* the data_hdr and association have already been assigned.
*/
void
sctp_chunk_assign_ssn
(
s
ctp_chunk_t
*
chunk
)
void
sctp_chunk_assign_ssn
(
s
truct
sctp_chunk
*
chunk
)
{
__u16
ssn
;
__u16
sid
;
...
...
@@ -1278,7 +1204,7 @@ void sctp_chunk_assign_ssn(sctp_chunk_t *chunk)
/* Helper function to assign a TSN if needed. This assumes that both
* the data_hdr and association have already been assigned.
*/
void
sctp_chunk_assign_tsn
(
s
ctp_chunk_t
*
chunk
)
void
sctp_chunk_assign_tsn
(
s
truct
sctp_chunk
*
chunk
)
{
if
(
!
chunk
->
has_tsn
)
{
/* This is the last possible instant to
...
...
@@ -1291,10 +1217,10 @@ void sctp_chunk_assign_tsn(sctp_chunk_t *chunk)
}
/* Create a CLOSED association to use with an incoming packet. */
s
ctp_association_t
*
sctp_make_temp_asoc
(
const
struct
sctp_endpoint
*
ep
,
s
truct
sctp_association
*
sctp_make_temp_asoc
(
const
struct
sctp_endpoint
*
ep
,
struct
sctp_chunk
*
chunk
,
int
gfp
)
{
s
ctp_association_t
*
asoc
;
s
truct
sctp_association
*
asoc
;
struct
sk_buff
*
skb
;
sctp_scope_t
scope
;
...
...
@@ -1303,6 +1229,7 @@ sctp_association_t *sctp_make_temp_asoc(const struct sctp_endpoint *ep,
asoc
=
sctp_association_new
(
ep
,
ep
->
base
.
sk
,
scope
,
gfp
);
if
(
!
asoc
)
goto
nodata
;
asoc
->
temp
=
1
;
skb
=
chunk
->
skb
;
/* Create an entry for the source address of the packet. */
/* FIXME: Use the af specific helpers. */
...
...
@@ -1339,15 +1266,18 @@ sctp_association_t *sctp_make_temp_asoc(const struct sctp_endpoint *ep,
/* Build a cookie representing asoc.
* This INCLUDES the param header needed to put the cookie in the INIT ACK.
*/
sctp_cookie_param_t
*
sctp_pack_cookie
(
const
s
ctp_endpoint_
t
*
ep
,
const
s
ctp_association_t
*
asoc
,
const
s
ctp_chunk_t
*
init_chunk
,
sctp_cookie_param_t
*
sctp_pack_cookie
(
const
s
truct
sctp_endpoin
t
*
ep
,
const
s
truct
sctp_association
*
asoc
,
const
s
truct
sctp_chunk
*
init_chunk
,
int
*
cookie_len
,
const
__u8
*
raw_addrs
,
int
addrs_len
)
{
sctp_cookie_param_t
*
retval
;
sctp_signed_cookie_t
*
cookie
;
struct
scatterlist
sg
;
int
headersize
,
bodysize
;
unsigned
int
keylen
;
char
*
key
;
headersize
=
sizeof
(
sctp_paramhdr_t
)
+
SCTP_SECRET_SIZE
;
bodysize
=
sizeof
(
sctp_cookie_t
)
...
...
@@ -1361,8 +1291,8 @@ sctp_cookie_param_t *sctp_pack_cookie(const sctp_endpoint_t *ep,
-
(
bodysize
%
SCTP_COOKIE_MULTIPLE
);
*
cookie_len
=
headersize
+
bodysize
;
retval
=
(
sctp_cookie_param_t
*
)
kmalloc
(
*
cookie_len
,
GFP_ATOMIC
);
retval
=
(
sctp_cookie_param_t
*
)
kmalloc
(
*
cookie_len
,
GFP_ATOMIC
);
if
(
!
retval
)
{
*
cookie_len
=
0
;
goto
nodata
;
...
...
@@ -1393,30 +1323,39 @@ sctp_cookie_param_t *sctp_pack_cookie(const sctp_endpoint_t *ep,
/* Copy the raw local address list of the association. */
memcpy
((
__u8
*
)
&
cookie
->
c
.
peer_init
[
0
]
+
ntohs
(
init_chunk
->
chunk_hdr
->
length
),
raw_addrs
,
addrs_len
);
/* Sign the message. */
sctp_hash_digest
(
ep
->
secret_key
[
ep
->
current_key
],
SCTP_SECRET_SIZE
,
(
__u8
*
)
&
cookie
->
c
,
bodysize
,
cookie
->
signature
);
ntohs
(
init_chunk
->
chunk_hdr
->
length
),
raw_addrs
,
addrs_len
);
if
(
sctp_sk
(
ep
->
base
.
sk
)
->
hmac
)
{
/* Sign the message. */
sg
.
page
=
virt_to_page
(
&
cookie
->
c
);
sg
.
offset
=
(
unsigned
long
)(
&
cookie
->
c
)
%
PAGE_SIZE
;
sg
.
length
=
bodysize
;
keylen
=
SCTP_SECRET_SIZE
;
key
=
(
char
*
)
ep
->
secret_key
[
ep
->
current_key
];
sctp_crypto_hmac
(
sctp_sk
(
ep
->
base
.
sk
)
->
hmac
,
key
,
&
keylen
,
&
sg
,
1
,
cookie
->
signature
);
}
nodata:
return
retval
;
}
/* Unpack the cookie from COOKIE ECHO chunk, recreating the association. */
sctp_association_t
*
sctp_unpack_cookie
(
const
sctp_endpoint_t
*
ep
,
const
sctp_association_t
*
asoc
,
sctp_chunk_t
*
chunk
,
int
gfp
,
int
*
error
,
sctp_chunk_t
**
err_chk_p
)
struct
sctp_association
*
sctp_unpack_cookie
(
const
struct
sctp_endpoint
*
ep
,
const
struct
sctp_association
*
asoc
,
struct
sctp_chunk
*
chunk
,
int
gfp
,
int
*
error
,
struct
sctp_chunk
**
errp
)
{
s
ctp_association_t
*
retval
=
NULL
;
s
truct
sctp_association
*
retval
=
NULL
;
sctp_signed_cookie_t
*
cookie
;
sctp_cookie_t
*
bear_cookie
;
int
headersize
,
bodysize
;
int
fixed_size
;
__u8
digest_buf
[
SCTP_SIGNATURE_SIZE
];
int
secret
;
int
headersize
,
bodysize
,
fixed_size
;
__u8
digest
[
SCTP_SIGNATURE_SIZE
];
struct
scatterlist
sg
;
unsigned
int
keylen
;
char
*
key
;
sctp_scope_t
scope
;
struct
sk_buff
*
skb
=
chunk
->
skb
;
...
...
@@ -1440,23 +1379,35 @@ sctp_association_t *sctp_unpack_cookie(const sctp_endpoint_t *ep,
cookie
=
chunk
->
subh
.
cookie_hdr
;
bear_cookie
=
&
cookie
->
c
;
if
(
!
sctp_sk
(
ep
->
base
.
sk
)
->
hmac
)
goto
no_hmac
;
/* Check the signature. */
secret
=
ep
->
current_key
;
sctp_hash_digest
(
ep
->
secret_key
[
secret
],
SCTP_SECRET_SIZE
,
(
__u8
*
)
bear_cookie
,
bodysize
,
digest_buf
);
if
(
memcmp
(
digest_buf
,
cookie
->
signature
,
SCTP_SIGNATURE_SIZE
))
{
keylen
=
SCTP_SECRET_SIZE
;
sg
.
page
=
virt_to_page
(
bear_cookie
);
sg
.
offset
=
(
unsigned
long
)(
bear_cookie
)
%
PAGE_SIZE
;
sg
.
length
=
bodysize
;
key
=
(
char
*
)
ep
->
secret_key
[
ep
->
current_key
];
memset
(
digest
,
0x00
,
sizeof
(
digest
));
sctp_crypto_hmac
(
sctp_sk
(
ep
->
base
.
sk
)
->
hmac
,
key
,
&
keylen
,
&
sg
,
1
,
digest
);
if
(
memcmp
(
digest
,
cookie
->
signature
,
SCTP_SIGNATURE_SIZE
))
{
/* Try the previous key. */
secret
=
ep
->
last_key
;
sctp_hash_digest
(
ep
->
secret_key
[
secret
],
SCTP_SECRET_SIZE
,
(
__u8
*
)
bear_cookie
,
bodysize
,
digest_buf
);
if
(
memcmp
(
digest_buf
,
cookie
->
signature
,
SCTP_SIGNATURE_SIZE
))
{
key
=
(
char
*
)
ep
->
secret_key
[
ep
->
last_key
];
memset
(
digest
,
0x00
,
sizeof
(
digest
));
sctp_crypto_hmac
(
sctp_sk
(
ep
->
base
.
sk
)
->
hmac
,
key
,
&
keylen
,
&
sg
,
1
,
digest
);
if
(
memcmp
(
digest
,
cookie
->
signature
,
SCTP_SIGNATURE_SIZE
))
{
/* Yikes! Still bad signature! */
*
error
=
-
SCTP_IERROR_BAD_SIG
;
goto
fail
;
}
}
no_hmac:
/* Check to see if the cookie is stale. If there is already
* an association, there is no need to check cookie's expiration
* for init collision case of lost COOKIE ACK.
...
...
@@ -1472,15 +1423,15 @@ sctp_association_t *sctp_unpack_cookie(const sctp_endpoint_t *ep,
* Cookie that has expired.
*/
len
=
ntohs
(
chunk
->
chunk_hdr
->
length
);
*
err
_chk_
p
=
sctp_make_op_error_space
(
asoc
,
chunk
,
len
);
if
(
*
err
_chk_
p
)
{
*
errp
=
sctp_make_op_error_space
(
asoc
,
chunk
,
len
);
if
(
*
errp
)
{
suseconds_t
usecs
=
(
skb
->
stamp
.
tv_sec
-
bear_cookie
->
expiration
.
tv_sec
)
*
1000000L
+
skb
->
stamp
.
tv_usec
-
bear_cookie
->
expiration
.
tv_usec
;
usecs
=
htonl
(
usecs
);
sctp_init_cause
(
*
err
_chk_
p
,
SCTP_ERROR_STALE_COOKIE
,
sctp_init_cause
(
*
errp
,
SCTP_ERROR_STALE_COOKIE
,
&
usecs
,
sizeof
(
usecs
));
*
error
=
-
SCTP_IERROR_STALE_COOKIE
;
}
else
...
...
@@ -1509,6 +1460,12 @@ sctp_association_t *sctp_unpack_cookie(const sctp_endpoint_t *ep,
goto
fail
;
}
/* Also, add the destination address. */
if
(
list_empty
(
&
retval
->
base
.
bind_addr
.
address_list
))
{
sctp_add_bind_addr
(
&
retval
->
base
.
bind_addr
,
&
chunk
->
dest
,
GFP_ATOMIC
);
}
retval
->
next_tsn
=
retval
->
c
.
initial_tsn
;
retval
->
ctsn_ack_point
=
retval
->
next_tsn
-
1
;
...
...
@@ -1541,10 +1498,10 @@ struct __sctp_missing {
/*
* Report a missing mandatory parameter.
*/
static
int
sctp_process_missing_param
(
const
s
ctp_association_t
*
asoc
,
static
int
sctp_process_missing_param
(
const
s
truct
sctp_association
*
asoc
,
sctp_param_t
paramtype
,
s
ctp_chunk_t
*
chunk
,
s
ctp_chunk_t
**
err_chk_
p
)
s
truct
sctp_chunk
*
chunk
,
s
truct
sctp_chunk
**
err
p
)
{
struct
__sctp_missing
report
;
__u16
len
;
...
...
@@ -1554,13 +1511,13 @@ static int sctp_process_missing_param(const sctp_association_t *asoc,
/* Make an ERROR chunk, preparing enough room for
* returning multiple unknown parameters.
*/
if
(
!*
err
_chk_
p
)
*
err
_chk_
p
=
sctp_make_op_error_space
(
asoc
,
chunk
,
len
);
if
(
!*
errp
)
*
errp
=
sctp_make_op_error_space
(
asoc
,
chunk
,
len
);
if
(
*
err
_chk_
p
)
{
if
(
*
errp
)
{
report
.
num_missing
=
htonl
(
1
);
report
.
type
=
paramtype
;
sctp_init_cause
(
*
err
_chk_
p
,
SCTP_ERROR_INV_PARAM
,
sctp_init_cause
(
*
errp
,
SCTP_ERROR_INV_PARAM
,
&
report
,
sizeof
(
report
));
}
...
...
@@ -1569,17 +1526,17 @@ static int sctp_process_missing_param(const sctp_association_t *asoc,
}
/* Report an Invalid Mandatory Parameter. */
static
int
sctp_process_inv_mandatory
(
const
s
ctp_association_t
*
asoc
,
s
ctp_chunk_t
*
chunk
,
s
ctp_chunk_t
**
err_chk_
p
)
static
int
sctp_process_inv_mandatory
(
const
s
truct
sctp_association
*
asoc
,
s
truct
sctp_chunk
*
chunk
,
s
truct
sctp_chunk
**
err
p
)
{
/* Invalid Mandatory Parameter Error has no payload. */
if
(
!*
err
_chk_
p
)
*
err
_chk_
p
=
sctp_make_op_error_space
(
asoc
,
chunk
,
0
);
if
(
!*
errp
)
*
errp
=
sctp_make_op_error_space
(
asoc
,
chunk
,
0
);
if
(
*
err
_chk_
p
)
sctp_init_cause
(
*
err
_chk_
p
,
SCTP_ERROR_INV_PARAM
,
NULL
,
0
);
if
(
*
errp
)
sctp_init_cause
(
*
errp
,
SCTP_ERROR_INV_PARAM
,
NULL
,
0
);
/* Stop processing this chunk. */
return
0
;
...
...
@@ -1588,19 +1545,19 @@ static int sctp_process_inv_mandatory(const sctp_association_t *asoc,
/* Do not attempt to handle the HOST_NAME parm. However, do
* send back an indicator to the peer.
*/
static
int
sctp_process_hn_param
(
const
s
ctp_association_t
*
asoc
,
static
int
sctp_process_hn_param
(
const
s
truct
sctp_association
*
asoc
,
union
sctp_params
param
,
s
ctp_chunk_t
*
chunk
,
s
ctp_chunk_t
**
err_chk_
p
)
s
truct
sctp_chunk
*
chunk
,
s
truct
sctp_chunk
**
err
p
)
{
__u16
len
=
ntohs
(
param
.
p
->
length
);
/* Make an ERROR chunk. */
if
(
!*
err
_chk_
p
)
*
err
_chk_
p
=
sctp_make_op_error_space
(
asoc
,
chunk
,
len
);
if
(
!*
errp
)
*
errp
=
sctp_make_op_error_space
(
asoc
,
chunk
,
len
);
if
(
*
err
_chk_
p
)
sctp_init_cause
(
*
err
_chk_
p
,
SCTP_ERROR_DNS_FAILED
,
if
(
*
errp
)
sctp_init_cause
(
*
errp
,
SCTP_ERROR_DNS_FAILED
,
param
.
v
,
len
);
/* Stop processing this chunk. */
...
...
@@ -1633,10 +1590,10 @@ static int sctp_process_hn_param(const sctp_association_t *asoc,
* 0 - discard the chunk
* 1 - continue with the chunk
*/
static
int
sctp_process_unk_param
(
const
s
ctp_association_t
*
asoc
,
static
int
sctp_process_unk_param
(
const
s
truct
sctp_association
*
asoc
,
union
sctp_params
param
,
s
ctp_chunk_t
*
chunk
,
s
ctp_chunk_t
**
err_chk_
p
)
s
truct
sctp_chunk
*
chunk
,
s
truct
sctp_chunk
**
err
p
)
{
int
retval
=
1
;
...
...
@@ -1649,12 +1606,12 @@ static int sctp_process_unk_param(const sctp_association_t *asoc,
/* Make an ERROR chunk, preparing enough room for
* returning multiple unknown parameters.
*/
if
(
NULL
==
*
err
_chk_
p
)
*
err
_chk_
p
=
sctp_make_op_error_space
(
asoc
,
chunk
,
if
(
NULL
==
*
errp
)
*
errp
=
sctp_make_op_error_space
(
asoc
,
chunk
,
ntohs
(
chunk
->
chunk_hdr
->
length
));
if
(
*
err
_chk_
p
)
sctp_init_cause
(
*
err
_chk_
p
,
SCTP_ERROR_UNKNOWN_PARAM
,
if
(
*
errp
)
sctp_init_cause
(
*
errp
,
SCTP_ERROR_UNKNOWN_PARAM
,
param
.
v
,
WORD_ROUND
(
ntohs
(
param
.
p
->
length
)));
...
...
@@ -1665,12 +1622,12 @@ static int sctp_process_unk_param(const sctp_association_t *asoc,
/* Make an ERROR chunk, preparing enough room for
* returning multiple unknown parameters.
*/
if
(
NULL
==
*
err
_chk_
p
)
*
err
_chk_
p
=
sctp_make_op_error_space
(
asoc
,
chunk
,
if
(
NULL
==
*
errp
)
*
errp
=
sctp_make_op_error_space
(
asoc
,
chunk
,
ntohs
(
chunk
->
chunk_hdr
->
length
));
if
(
*
err
_chk_
p
)
{
sctp_init_cause
(
*
err
_chk_
p
,
SCTP_ERROR_UNKNOWN_PARAM
,
if
(
*
errp
)
{
sctp_init_cause
(
*
errp
,
SCTP_ERROR_UNKNOWN_PARAM
,
param
.
v
,
WORD_ROUND
(
ntohs
(
param
.
p
->
length
)));
}
else
{
...
...
@@ -1695,11 +1652,11 @@ static int sctp_process_unk_param(const sctp_association_t *asoc,
* 0 - discard the chunk
* 1 - continue with the chunk
*/
static
int
sctp_verify_param
(
const
s
ctp_association_t
*
asoc
,
static
int
sctp_verify_param
(
const
s
truct
sctp_association
*
asoc
,
union
sctp_params
param
,
sctp_cid_t
cid
,
s
ctp_chunk_t
*
chunk
,
s
ctp_chunk_t
**
err_chunk
)
s
truct
sctp_chunk
*
chunk
,
s
truct
sctp_chunk
**
err_chunk
)
{
int
retval
=
1
;
...
...
@@ -1733,11 +1690,11 @@ static int sctp_verify_param(const sctp_association_t *asoc,
}
/* Verify the INIT packet before we process it. */
int
sctp_verify_init
(
const
s
ctp_association_t
*
asoc
,
int
sctp_verify_init
(
const
s
truct
sctp_association
*
asoc
,
sctp_cid_t
cid
,
sctp_init_chunk_t
*
peer_init
,
s
ctp_chunk_t
*
chunk
,
s
ctp_chunk_t
**
err_chk_
p
)
s
truct
sctp_chunk
*
chunk
,
s
truct
sctp_chunk
**
err
p
)
{
union
sctp_params
param
;
int
has_cookie
=
0
;
...
...
@@ -1746,7 +1703,7 @@ int sctp_verify_init(const sctp_association_t *asoc,
if
((
0
==
peer_init
->
init_hdr
.
num_outbound_streams
)
||
(
0
==
peer_init
->
init_hdr
.
num_inbound_streams
))
{
sctp_process_inv_mandatory
(
asoc
,
chunk
,
err
_chk_
p
);
sctp_process_inv_mandatory
(
asoc
,
chunk
,
errp
);
return
0
;
}
...
...
@@ -1762,9 +1719,8 @@ int sctp_verify_init(const sctp_association_t *asoc,
* the state cookie for an INIT-ACK chunk.
*/
if
((
SCTP_CID_INIT_ACK
==
cid
)
&&
!
has_cookie
)
{
sctp_process_missing_param
(
asoc
,
SCTP_PARAM_STATE_COOKIE
,
chunk
,
err
_chk_
p
);
chunk
,
errp
);
return
0
;
}
...
...
@@ -1772,7 +1728,7 @@ int sctp_verify_init(const sctp_association_t *asoc,
sctp_walk_params
(
param
,
peer_init
,
init_hdr
.
params
)
{
if
(
!
sctp_verify_param
(
asoc
,
param
,
cid
,
chunk
,
err
_chk_
p
))
if
(
!
sctp_verify_param
(
asoc
,
param
,
cid
,
chunk
,
errp
))
return
0
;
}
/* for (loop through all parameters) */
...
...
@@ -1784,7 +1740,7 @@ int sctp_verify_init(const sctp_association_t *asoc,
* Returns 0 on failure, else success.
* FIXME: This is an association method.
*/
int
sctp_process_init
(
s
ctp_association_t
*
asoc
,
sctp_cid_t
cid
,
int
sctp_process_init
(
s
truct
sctp_association
*
asoc
,
sctp_cid_t
cid
,
const
union
sctp_addr
*
peer_addr
,
sctp_init_chunk_t
*
peer_init
,
int
gfp
)
{
...
...
@@ -1923,7 +1879,7 @@ int sctp_process_init(sctp_association_t *asoc, sctp_cid_t cid,
* work we do. In particular, we should not build transport
* structures for the addresses.
*/
int
sctp_process_param
(
s
ctp_association_t
*
asoc
,
union
sctp_params
param
,
int
sctp_process_param
(
s
truct
sctp_association
*
asoc
,
union
sctp_params
param
,
const
union
sctp_addr
*
peer_addr
,
int
gfp
)
{
union
sctp_addr
addr
;
...
...
@@ -2032,7 +1988,7 @@ int sctp_process_param(sctp_association_t *asoc, union sctp_params param,
}
/* Select a new verification tag. */
__u32
sctp_generate_tag
(
const
s
ctp_endpoint_
t
*
ep
)
__u32
sctp_generate_tag
(
const
s
truct
sctp_endpoin
t
*
ep
)
{
/* I believe that this random number generator complies with RFC1750.
* A tag of 0 is reserved for special cases (e.g. INIT).
...
...
@@ -2047,7 +2003,7 @@ __u32 sctp_generate_tag(const sctp_endpoint_t *ep)
}
/* Select an initial TSN to send during startup. */
__u32
sctp_generate_tsn
(
const
s
ctp_endpoint_
t
*
ep
)
__u32
sctp_generate_tsn
(
const
s
truct
sctp_endpoin
t
*
ep
)
{
__u32
retval
;
...
...
net/sctp/sm_sideeffect.c
View file @
3e446c25
...
...
@@ -60,7 +60,8 @@
********************************************************************/
/* A helper function for delayed processing of INET ECN CE bit. */
static
void
sctp_do_ecn_ce_work
(
sctp_association_t
*
asoc
,
__u32
lowest_tsn
)
static
void
sctp_do_ecn_ce_work
(
struct
sctp_association
*
asoc
,
__u32
lowest_tsn
)
{
/* Save the TSN away for comparison when we receive CWR */
...
...
@@ -80,11 +81,11 @@ static void sctp_do_ecn_ce_work(sctp_association_t *asoc, __u32 lowest_tsn)
* This element represents the lowest TSN number in the datagram
* that was originally marked with the CE bit.
*/
static
s
ctp_chunk_t
*
sctp_do_ecn_ecne_work
(
sctp_association_t
*
asoc
,
static
s
truct
sctp_chunk
*
sctp_do_ecn_ecne_work
(
struct
sctp_association
*
asoc
,
__u32
lowest_tsn
,
s
ctp_chunk_t
*
chunk
)
s
truct
sctp_chunk
*
chunk
)
{
s
ctp_chunk_t
*
repl
;
s
truct
sctp_chunk
*
repl
;
/* Our previously transmitted packet ran into some congestion
* so we should take action by reducing cwnd and ssthresh
...
...
@@ -123,7 +124,7 @@ static sctp_chunk_t *sctp_do_ecn_ecne_work(sctp_association_t *asoc,
}
/* Helper function to do delayed processing of ECN CWR chunk. */
static
void
sctp_do_ecn_cwr_work
(
s
ctp_association_t
*
asoc
,
static
void
sctp_do_ecn_cwr_work
(
s
truct
sctp_association
*
asoc
,
__u32
lowest_tsn
)
{
/* Turn off ECNE getting auto-prepended to every outgoing
...
...
@@ -200,7 +201,7 @@ void sctp_generate_t3_rtx_event(unsigned long peer)
{
int
error
;
struct
sctp_transport
*
transport
=
(
struct
sctp_transport
*
)
peer
;
s
ctp_association_t
*
asoc
=
transport
->
asoc
;
s
truct
sctp_association
*
asoc
=
transport
->
asoc
;
/* Check whether a task is in the sock. */
...
...
@@ -238,7 +239,7 @@ void sctp_generate_t3_rtx_event(unsigned long peer)
/* This is a sa interface for producing timeout events. It works
* for timeouts which use the association as their parameter.
*/
static
void
sctp_generate_timeout_event
(
s
ctp_association_t
*
asoc
,
static
void
sctp_generate_timeout_event
(
s
truct
sctp_association
*
asoc
,
sctp_event_timeout_t
timeout_type
)
{
int
error
=
0
;
...
...
@@ -265,8 +266,7 @@ static void sctp_generate_timeout_event(sctp_association_t *asoc,
error
=
sctp_do_sm
(
SCTP_EVENT_T_TIMEOUT
,
SCTP_ST_TIMEOUT
(
timeout_type
),
asoc
->
state
,
asoc
->
ep
,
asoc
,
(
void
*
)
timeout_type
,
GFP_ATOMIC
);
(
void
*
)
timeout_type
,
GFP_ATOMIC
);
if
(
error
)
asoc
->
base
.
sk
->
err
=
-
error
;
...
...
@@ -278,25 +278,25 @@ static void sctp_generate_timeout_event(sctp_association_t *asoc,
void
sctp_generate_t1_cookie_event
(
unsigned
long
data
)
{
s
ctp_association_t
*
asoc
=
(
sctp_association_t
*
)
data
;
s
truct
sctp_association
*
asoc
=
(
struct
sctp_association
*
)
data
;
sctp_generate_timeout_event
(
asoc
,
SCTP_EVENT_TIMEOUT_T1_COOKIE
);
}
void
sctp_generate_t1_init_event
(
unsigned
long
data
)
{
s
ctp_association_t
*
asoc
=
(
sctp_association_t
*
)
data
;
s
truct
sctp_association
*
asoc
=
(
struct
sctp_association
*
)
data
;
sctp_generate_timeout_event
(
asoc
,
SCTP_EVENT_TIMEOUT_T1_INIT
);
}
void
sctp_generate_t2_shutdown_event
(
unsigned
long
data
)
{
s
ctp_association_t
*
asoc
=
(
sctp_association_t
*
)
data
;
s
truct
sctp_association
*
asoc
=
(
struct
sctp_association
*
)
data
;
sctp_generate_timeout_event
(
asoc
,
SCTP_EVENT_TIMEOUT_T2_SHUTDOWN
);
}
void
sctp_generate_t5_shutdown_guard_event
(
unsigned
long
data
)
{
s
ctp_association_t
*
asoc
=
(
sctp_association_t
*
)
data
;
s
truct
sctp_association
*
asoc
=
(
struct
sctp_association
*
)
data
;
sctp_generate_timeout_event
(
asoc
,
SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD
);
...
...
@@ -304,7 +304,7 @@ void sctp_generate_t5_shutdown_guard_event(unsigned long data)
void
sctp_generate_autoclose_event
(
unsigned
long
data
)
{
s
ctp_association_t
*
asoc
=
(
sctp_association_t
*
)
data
;
s
truct
sctp_association
*
asoc
=
(
struct
sctp_association
*
)
data
;
sctp_generate_timeout_event
(
asoc
,
SCTP_EVENT_TIMEOUT_AUTOCLOSE
);
}
...
...
@@ -315,7 +315,7 @@ void sctp_generate_heartbeat_event(unsigned long data)
{
int
error
=
0
;
struct
sctp_transport
*
transport
=
(
struct
sctp_transport
*
)
data
;
s
ctp_association_t
*
asoc
=
transport
->
asoc
;
s
truct
sctp_association
*
asoc
=
transport
->
asoc
;
sctp_bh_lock_sock
(
asoc
->
base
.
sk
);
if
(
sock_owned_by_user
(
asoc
->
base
.
sk
))
{
...
...
@@ -335,8 +335,7 @@ void sctp_generate_heartbeat_event(unsigned long data)
error
=
sctp_do_sm
(
SCTP_EVENT_T_TIMEOUT
,
SCTP_ST_TIMEOUT
(
SCTP_EVENT_TIMEOUT_HEARTBEAT
),
asoc
->
state
,
asoc
->
ep
,
asoc
,
asoc
->
state
,
asoc
->
ep
,
asoc
,
transport
,
GFP_ATOMIC
);
if
(
error
)
...
...
@@ -350,7 +349,7 @@ void sctp_generate_heartbeat_event(unsigned long data)
/* Inject a SACK Timeout event into the state machine. */
void
sctp_generate_sack_event
(
unsigned
long
data
)
{
s
ctp_association_t
*
asoc
=
(
sctp_association_t
*
)
data
;
s
truct
sctp_association
*
asoc
=
(
struct
sctp_association
*
)
data
;
sctp_generate_timeout_event
(
asoc
,
SCTP_EVENT_TIMEOUT_SACK
);
}
...
...
@@ -382,7 +381,7 @@ sctp_timer_event_t *sctp_timer_events[SCTP_NUM_TIMEOUT_TYPES] = {
* notification SHOULD be sent to the upper layer.
*
*/
static
void
sctp_do_8_2_transport_strike
(
s
ctp_association_t
*
asoc
,
static
void
sctp_do_8_2_transport_strike
(
s
truct
sctp_association
*
asoc
,
struct
sctp_transport
*
transport
)
{
/* The check for association's overall error counter exceeding the
...
...
@@ -410,7 +409,7 @@ static void sctp_do_8_2_transport_strike(sctp_association_t *asoc,
/* Worker routine to handle INIT command failure. */
static
void
sctp_cmd_init_failed
(
sctp_cmd_seq_t
*
commands
,
s
ctp_association_t
*
asoc
,
s
truct
sctp_association
*
asoc
,
unsigned
error
)
{
struct
sctp_ulpevent
*
event
;
...
...
@@ -483,7 +482,7 @@ static int sctp_cmd_process_init(sctp_cmd_seq_t *commands,
/* Helper function to break out starting up of heartbeat timers. */
static
void
sctp_cmd_hb_timers_start
(
sctp_cmd_seq_t
*
cmds
,
s
ctp_association_t
*
asoc
)
s
truct
sctp_association
*
asoc
)
{
struct
sctp_transport
*
t
;
struct
list_head
*
pos
;
...
...
@@ -501,7 +500,7 @@ static void sctp_cmd_hb_timers_start(sctp_cmd_seq_t *cmds,
}
static
void
sctp_cmd_hb_timers_stop
(
sctp_cmd_seq_t
*
cmds
,
s
ctp_association_t
*
asoc
)
s
truct
sctp_association
*
asoc
)
{
struct
sctp_transport
*
t
;
struct
list_head
*
pos
;
...
...
@@ -517,7 +516,7 @@ static void sctp_cmd_hb_timers_stop(sctp_cmd_seq_t *cmds,
/* Helper function to update the heartbeat timer. */
static
void
sctp_cmd_hb_timer_update
(
sctp_cmd_seq_t
*
cmds
,
s
ctp_association_t
*
asoc
,
s
truct
sctp_association
*
asoc
,
struct
sctp_transport
*
t
)
{
/* Update the heartbeat timer. */
...
...
@@ -527,9 +526,9 @@ static void sctp_cmd_hb_timer_update(sctp_cmd_seq_t *cmds,
/* Helper function to handle the reception of an HEARTBEAT ACK. */
static
void
sctp_cmd_transport_on
(
sctp_cmd_seq_t
*
cmds
,
s
ctp_association_t
*
asoc
,
s
truct
sctp_association
*
asoc
,
struct
sctp_transport
*
t
,
s
ctp_chunk_t
*
chunk
)
s
truct
sctp_chunk
*
chunk
)
{
sctp_sender_hb_info_t
*
hbinfo
;
...
...
@@ -560,7 +559,7 @@ static void sctp_cmd_transport_on(sctp_cmd_seq_t *cmds,
* timer.
*/
static
void
sctp_cmd_transport_reset
(
sctp_cmd_seq_t
*
cmds
,
s
ctp_association_t
*
asoc
,
s
truct
sctp_association
*
asoc
,
struct
sctp_transport
*
t
)
{
sctp_transport_lower_cwnd
(
t
,
SCTP_LOWER_CWND_INACTIVE
);
...
...
@@ -571,7 +570,7 @@ static void sctp_cmd_transport_reset(sctp_cmd_seq_t *cmds,
/* Helper function to process the process SACK command. */
static
int
sctp_cmd_process_sack
(
sctp_cmd_seq_t
*
cmds
,
s
ctp_association_t
*
asoc
,
s
truct
sctp_association
*
asoc
,
sctp_sackhdr_t
*
sackh
)
{
int
err
;
...
...
@@ -595,8 +594,9 @@ static int sctp_cmd_process_sack(sctp_cmd_seq_t *cmds,
/* Helper function to set the timeout value for T2-SHUTDOWN timer and to set
* the transport for a shutdown chunk.
*/
static
void
sctp_cmd_setup_t2
(
sctp_cmd_seq_t
*
cmds
,
sctp_association_t
*
asoc
,
sctp_chunk_t
*
chunk
)
static
void
sctp_cmd_setup_t2
(
sctp_cmd_seq_t
*
cmds
,
struct
sctp_association
*
asoc
,
struct
sctp_chunk
*
chunk
)
{
struct
sctp_transport
*
t
;
...
...
@@ -607,18 +607,30 @@ static void sctp_cmd_setup_t2(sctp_cmd_seq_t *cmds, sctp_association_t *asoc,
}
/* Helper function to change the state of an association. */
static
void
sctp_cmd_new_state
(
sctp_cmd_seq_t
*
cmds
,
s
ctp_association_t
*
asoc
,
static
void
sctp_cmd_new_state
(
sctp_cmd_seq_t
*
cmds
,
s
truct
sctp_association
*
asoc
,
sctp_state_t
state
)
{
struct
sock
*
sk
=
asoc
->
base
.
sk
;
struct
sctp_opt
*
sp
=
sctp_sk
(
sk
);
asoc
->
state
=
state
;
asoc
->
state_timestamp
=
jiffies
;
if
((
SCTP_STATE_ESTABLISHED
==
asoc
->
state
)
||
(
SCTP_STATE_CLOSED
==
asoc
->
state
))
{
if
(
sctp_style
(
sk
,
TCP
))
{
/* Change the sk->state of a TCP-style socket that has
* sucessfully completed a connect() call.
*/
if
(
sctp_state
(
asoc
,
ESTABLISHED
)
&&
sctp_sstate
(
sk
,
CLOSED
))
sk
->
state
=
SCTP_SS_ESTABLISHED
;
/* Set the RCV_SHUTDOWN flag when a SHUTDOWN is received. */
if
(
sctp_state
(
asoc
,
SHUTDOWN_RECEIVED
)
&&
sctp_sstate
(
sk
,
ESTABLISHED
))
sk
->
shutdown
|=
RCV_SHUTDOWN
;
}
if
(
sctp_state
(
asoc
,
ESTABLISHED
)
||
sctp_state
(
asoc
,
CLOSED
)
||
sctp_state
(
asoc
,
SHUTDOWN_RECEIVED
))
{
/* Wake up any processes waiting in the asoc's wait queue in
* sctp_wait_for_connect() or sctp_wait_for_sndbuf().
*/
...
...
@@ -631,16 +643,26 @@ static void sctp_cmd_new_state(sctp_cmd_seq_t *cmds, sctp_association_t *asoc,
* For a UDP-style socket, the waiters are woken up by the
* notifications.
*/
if
(
SCTP_SOCKET_UDP
!=
sp
->
type
)
if
(
!
sctp_style
(
sk
,
UDP
)
)
sk
->
state_change
(
sk
);
}
}
/* Change the sk->state of a TCP-style socket that has successfully
* completed a connect() call.
*/
if
((
SCTP_STATE_ESTABLISHED
==
asoc
->
state
)
&&
(
SCTP_SOCKET_TCP
==
sp
->
type
)
&&
(
SCTP_SS_CLOSED
==
sk
->
state
))
sk
->
state
=
SCTP_SS_ESTABLISHED
;
/* Helper function to delete an association. */
static
void
sctp_cmd_delete_tcb
(
sctp_cmd_seq_t
*
cmds
,
struct
sctp_association
*
asoc
)
{
struct
sock
*
sk
=
asoc
->
base
.
sk
;
/* If it is a non-temporary association belonging to a TCP-style
* listening socket, do not free it so that accept() can pick it
* up later.
*/
if
(
sctp_style
(
sk
,
TCP
)
&&
sctp_sstate
(
sk
,
LISTENING
)
&&
(
!
asoc
->
temp
))
return
;
sctp_unhash_established
(
asoc
);
sctp_association_free
(
asoc
);
}
/* These three macros allow us to pull the debugging code out of the
...
...
@@ -673,10 +695,10 @@ static void sctp_cmd_new_state(sctp_cmd_seq_t *cmds, sctp_association_t *asoc,
*/
int
sctp_do_sm
(
sctp_event_t
event_type
,
sctp_subtype_t
subtype
,
sctp_state_t
state
,
s
ctp_endpoint_
t
*
ep
,
s
ctp_association_t
*
asoc
,
s
truct
sctp_endpoin
t
*
ep
,
s
truct
sctp_association
*
asoc
,
void
*
event_arg
,
int
priority
)
int
gfp
)
{
sctp_cmd_seq_t
commands
;
sctp_sm_table_entry_t
*
state_fn
;
...
...
@@ -701,9 +723,8 @@ int sctp_do_sm(sctp_event_t event_type, sctp_subtype_t subtype,
DEBUG_POST
;
error
=
sctp_side_effects
(
event_type
,
subtype
,
state
,
ep
,
asoc
,
event_arg
,
status
,
&
commands
,
priority
);
ep
,
asoc
,
event_arg
,
status
,
&
commands
,
gfp
);
DEBUG_POST_SFX
;
return
error
;
...
...
@@ -717,12 +738,12 @@ int sctp_do_sm(sctp_event_t event_type, sctp_subtype_t subtype,
*****************************************************************/
int
sctp_side_effects
(
sctp_event_t
event_type
,
sctp_subtype_t
subtype
,
sctp_state_t
state
,
s
ctp_endpoint_
t
*
ep
,
s
ctp_association_t
*
asoc
,
s
truct
sctp_endpoin
t
*
ep
,
s
truct
sctp_association
*
asoc
,
void
*
event_arg
,
sctp_disposition_t
status
,
sctp_cmd_seq_t
*
commands
,
int
priority
)
int
gfp
)
{
int
error
;
...
...
@@ -735,7 +756,7 @@ int sctp_side_effects(sctp_event_t event_type, sctp_subtype_t subtype,
if
(
0
!=
(
error
=
sctp_cmd_interpreter
(
event_type
,
subtype
,
state
,
ep
,
asoc
,
event_arg
,
status
,
commands
,
priority
)))
commands
,
gfp
)))
goto
bail
;
switch
(
status
)
{
...
...
@@ -803,25 +824,26 @@ int sctp_side_effects(sctp_event_t event_type, sctp_subtype_t subtype,
/* This is the side-effect interpreter. */
int
sctp_cmd_interpreter
(
sctp_event_t
event_type
,
sctp_subtype_t
subtype
,
sctp_state_t
state
,
s
ctp_endpoint_
t
*
ep
,
s
ctp_association_t
*
asoc
,
void
*
event_arg
,
sctp_state_t
state
,
s
truct
sctp_endpoin
t
*
ep
,
s
truct
sctp_association
*
asoc
,
void
*
event_arg
,
sctp_disposition_t
status
,
sctp_cmd_seq_t
*
commands
,
int
priority
)
int
gfp
)
{
int
error
=
0
;
int
force
;
sctp_cmd_t
*
cmd
;
s
ctp_chunk_t
*
new_obj
;
s
ctp_chunk_t
*
chunk
=
NULL
;
s
truct
sctp_chunk
*
new_obj
;
s
truct
sctp_chunk
*
chunk
=
NULL
;
struct
sctp_packet
*
packet
;
struct
list_head
*
pos
;
struct
timer_list
*
timer
;
unsigned
long
timeout
;
struct
sctp_transport
*
t
;
sctp_sackhdr_t
sackh
;
int
local_cork
=
0
;
if
(
SCTP_EVENT_T_TIMEOUT
!=
event_type
)
chunk
=
(
s
ctp_chunk_t
*
)
event_arg
;
if
(
SCTP_EVENT_T_TIMEOUT
!=
event_type
)
chunk
=
(
s
truct
sctp_chunk
*
)
event_arg
;
/* Note: This whole file is a huge candidate for rework.
* For example, each command could either have its own handler, so
...
...
@@ -838,6 +860,10 @@ int sctp_cmd_interpreter(sctp_event_t event_type, sctp_subtype_t subtype,
case
SCTP_CMD_NEW_ASOC
:
/* Register a new association. */
if
(
local_cork
)
{
sctp_outq_uncork
(
&
asoc
->
outqueue
);
local_cork
=
0
;
}
asoc
=
cmd
->
obj
.
ptr
;
/* Register with the endpoint. */
sctp_endpoint_add_asoc
(
ep
,
asoc
);
...
...
@@ -852,10 +878,13 @@ int sctp_cmd_interpreter(sctp_event_t event_type, sctp_subtype_t subtype,
sctp_outq_teardown
(
&
asoc
->
outqueue
);
break
;
case
SCTP_CMD_DELETE_TCB
:
case
SCTP_CMD_DELETE_TCB
:
if
(
local_cork
)
{
sctp_outq_uncork
(
&
asoc
->
outqueue
);
local_cork
=
0
;
}
/* Delete the current association. */
sctp_unhash_established
(
asoc
);
sctp_association_free
(
asoc
);
sctp_cmd_delete_tcb
(
commands
,
asoc
);
asoc
=
NULL
;
break
;
...
...
@@ -903,7 +932,7 @@ int sctp_cmd_interpreter(sctp_event_t event_type, sctp_subtype_t subtype,
* layer which will bail.
*/
error
=
sctp_cmd_process_init
(
commands
,
asoc
,
chunk
,
cmd
->
obj
.
ptr
,
priority
);
cmd
->
obj
.
ptr
,
gfp
);
break
;
case
SCTP_CMD_GEN_COOKIE_ECHO
:
...
...
@@ -911,7 +940,7 @@ int sctp_cmd_interpreter(sctp_event_t event_type, sctp_subtype_t subtype,
new_obj
=
sctp_make_cookie_echo
(
asoc
,
chunk
);
if
(
!
new_obj
)
{
if
(
cmd
->
obj
.
ptr
)
sctp_
free_chunk
(
cmd
->
obj
.
ptr
);
sctp_
chunk_free
(
cmd
->
obj
.
ptr
);
goto
nomem
;
}
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_REPLY
,
...
...
@@ -957,9 +986,13 @@ int sctp_cmd_interpreter(sctp_event_t event_type, sctp_subtype_t subtype,
break
;
case
SCTP_CMD_REPLY
:
/* If an caller has not already corked, do cork. */
if
(
!
asoc
->
outqueue
.
cork
)
{
sctp_outq_cork
(
&
asoc
->
outqueue
);
local_cork
=
1
;
}
/* Send a chunk to our peer. */
error
=
sctp_outq_tail
(
&
asoc
->
outqueue
,
cmd
->
obj
.
ptr
);
error
=
sctp_outq_tail
(
&
asoc
->
outqueue
,
cmd
->
obj
.
ptr
);
break
;
case
SCTP_CMD_SEND_PKT
:
...
...
@@ -977,7 +1010,8 @@ int sctp_cmd_interpreter(sctp_event_t event_type, sctp_subtype_t subtype,
case
SCTP_CMD_TRANSMIT
:
/* Kick start transmission. */
error
=
sctp_outq_flush
(
&
asoc
->
outqueue
,
0
);
error
=
sctp_outq_uncork
(
&
asoc
->
outqueue
);
local_cork
=
0
;
break
;
case
SCTP_CMD_ECN_CE
:
...
...
@@ -1148,13 +1182,15 @@ int sctp_cmd_interpreter(sctp_event_t event_type, sctp_subtype_t subtype,
break
;
};
if
(
error
)
return
error
;
break
;
}
out:
if
(
local_cork
)
sctp_outq_uncork
(
&
asoc
->
outqueue
);
return
error
;
nomem:
error
=
-
ENOMEM
;
return
error
;
goto
out
;
}
net/sctp/sm_statefuns.c
View file @
3e446c25
...
...
@@ -95,13 +95,13 @@
*
* The return value is the disposition of the chunk.
*/
sctp_disposition_t
sctp_sf_do_4_C
(
const
s
ctp_endpoint_
t
*
ep
,
const
s
ctp_association_t
*
asoc
,
sctp_disposition_t
sctp_sf_do_4_C
(
const
s
truct
sctp_endpoin
t
*
ep
,
const
s
truct
sctp_association
*
asoc
,
const
sctp_subtype_t
type
,
void
*
arg
,
sctp_cmd_seq_t
*
commands
)
{
s
ctp_chunk_t
*
chunk
=
arg
;
s
truct
sctp_chunk
*
chunk
=
arg
;
struct
sctp_ulpevent
*
ev
;
/* RFC 2960 6.10 Bundling
...
...
@@ -179,16 +179,16 @@ sctp_disposition_t sctp_sf_do_4_C(const sctp_endpoint_t *ep,
*
* The return value is the disposition of the chunk.
*/
sctp_disposition_t
sctp_sf_do_5_1B_init
(
const
s
ctp_endpoint_
t
*
ep
,
const
s
ctp_association_t
*
asoc
,
sctp_disposition_t
sctp_sf_do_5_1B_init
(
const
s
truct
sctp_endpoin
t
*
ep
,
const
s
truct
sctp_association
*
asoc
,
const
sctp_subtype_t
type
,
void
*
arg
,
sctp_cmd_seq_t
*
commands
)
{
s
ctp_chunk_t
*
chunk
=
arg
;
s
ctp_chunk_t
*
repl
;
s
ctp_association_t
*
new_asoc
;
s
ctp_chunk_t
*
err_chunk
;
s
truct
sctp_chunk
*
chunk
=
arg
;
s
truct
sctp_chunk
*
repl
;
s
truct
sctp_association
*
new_asoc
;
s
truct
sctp_chunk
*
err_chunk
;
struct
sctp_packet
*
packet
;
sctp_unrecognized_param_t
*
unk_param
;
struct
sock
*
sk
;
...
...
@@ -212,9 +212,8 @@ sctp_disposition_t sctp_sf_do_5_1B_init(const sctp_endpoint_t *ep,
* on the TCP-style socket exceed the max backlog, respond with an
* ABORT.
*/
if
((
SCTP_SS_LISTENING
!=
sk
->
state
)
||
((
SCTP_SOCKET_TCP
==
sctp_sk
(
sk
)
->
type
)
&&
(
sk
->
ack_backlog
>=
sk
->
max_ack_backlog
)))
if
(
!
sctp_sstate
(
sk
,
LISTENING
)
||
(
sctp_style
(
sk
,
TCP
)
&&
(
sk
->
ack_backlog
>=
sk
->
max_ack_backlog
)))
return
sctp_sf_tabort_8_4_8
(
ep
,
asoc
,
type
,
arg
,
commands
);
/* Verify the INIT chunk before processing it. */
...
...
@@ -232,7 +231,7 @@ sctp_disposition_t sctp_sf_do_5_1B_init(const sctp_endpoint_t *ep,
ntohs
(
err_chunk
->
chunk_hdr
->
length
)
-
sizeof
(
sctp_chunkhdr_t
));
sctp_
free_chunk
(
err_chunk
);
sctp_
chunk_free
(
err_chunk
);
if
(
packet
)
{
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_SEND_PKT
,
...
...
@@ -260,8 +259,8 @@ sctp_disposition_t sctp_sf_do_5_1B_init(const sctp_endpoint_t *ep,
/* The call, sctp_process_init(), can fail on memory allocation. */
if
(
!
sctp_process_init
(
new_asoc
,
chunk
->
chunk_hdr
->
type
,
sctp_source
(
chunk
),
(
sctp_init_chunk_t
*
)
chunk
->
chunk_hdr
,
sctp_source
(
chunk
),
(
sctp_init_chunk_t
*
)
chunk
->
chunk_hdr
,
GFP_ATOMIC
))
goto
nomem_init
;
...
...
@@ -303,7 +302,7 @@ sctp_disposition_t sctp_sf_do_5_1B_init(const sctp_endpoint_t *ep,
* parameter type.
*/
sctp_addto_chunk
(
repl
,
len
,
unk_param
);
sctp_
free_chunk
(
err_chunk
);
sctp_
chunk_free
(
err_chunk
);
}
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_REPLY
,
SCTP_CHUNK
(
repl
));
...
...
@@ -320,7 +319,7 @@ sctp_disposition_t sctp_sf_do_5_1B_init(const sctp_endpoint_t *ep,
nomem_ack:
if
(
err_chunk
)
sctp_
free_chunk
(
err_chunk
);
sctp_
chunk_free
(
err_chunk
);
nomem_init:
sctp_association_free
(
new_asoc
);
nomem:
...
...
@@ -355,16 +354,16 @@ sctp_disposition_t sctp_sf_do_5_1B_init(const sctp_endpoint_t *ep,
*
* The return value is the disposition of the chunk.
*/
sctp_disposition_t
sctp_sf_do_5_1C_ack
(
const
s
ctp_endpoint_
t
*
ep
,
const
s
ctp_association_t
*
asoc
,
sctp_disposition_t
sctp_sf_do_5_1C_ack
(
const
s
truct
sctp_endpoin
t
*
ep
,
const
s
truct
sctp_association
*
asoc
,
const
sctp_subtype_t
type
,
void
*
arg
,
sctp_cmd_seq_t
*
commands
)
{
s
ctp_chunk_t
*
chunk
=
arg
;
s
truct
sctp_chunk
*
chunk
=
arg
;
sctp_init_chunk_t
*
initchunk
;
__u32
init_tag
;
s
ctp_chunk_t
*
err_chunk
;
s
truct
sctp_chunk
*
err_chunk
;
struct
sctp_packet
*
packet
;
sctp_disposition_t
ret
;
...
...
@@ -386,7 +385,7 @@ sctp_disposition_t sctp_sf_do_5_1C_ack(const sctp_endpoint_t *ep,
* error and close the association by transmitting an ABORT.
*/
if
(
!
init_tag
)
{
s
ctp_chunk_t
*
reply
=
sctp_make_abort
(
asoc
,
chunk
,
0
);
s
truct
sctp_chunk
*
reply
=
sctp_make_abort
(
asoc
,
chunk
,
0
);
if
(
!
reply
)
goto
nomem
;
...
...
@@ -416,7 +415,7 @@ sctp_disposition_t sctp_sf_do_5_1C_ack(const sctp_endpoint_t *ep,
ntohs
(
err_chunk
->
chunk_hdr
->
length
)
-
sizeof
(
sctp_chunkhdr_t
));
sctp_
free_chunk
(
err_chunk
);
sctp_
chunk_free
(
err_chunk
);
if
(
packet
)
{
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_SEND_PKT
,
...
...
@@ -514,18 +513,18 @@ sctp_disposition_t sctp_sf_do_5_1C_ack(const sctp_endpoint_t *ep,
*
* The return value is the disposition of the chunk.
*/
sctp_disposition_t
sctp_sf_do_5_1D_ce
(
const
s
ctp_endpoint_
t
*
ep
,
const
s
ctp_association_t
*
asoc
,
sctp_disposition_t
sctp_sf_do_5_1D_ce
(
const
s
truct
sctp_endpoin
t
*
ep
,
const
s
truct
sctp_association
*
asoc
,
const
sctp_subtype_t
type
,
void
*
arg
,
sctp_cmd_seq_t
*
commands
)
{
s
ctp_chunk_t
*
chunk
=
arg
;
s
ctp_association_t
*
new_asoc
;
s
truct
sctp_chunk
*
chunk
=
arg
;
s
truct
sctp_association
*
new_asoc
;
sctp_init_chunk_t
*
peer_init
;
s
ctp_chunk_t
*
repl
;
s
truct
sctp_chunk
*
repl
;
struct
sctp_ulpevent
*
ev
;
int
error
=
0
;
s
ctp_chunk_t
*
err_chk_p
;
s
truct
sctp_chunk
*
err_chk_p
;
/* If the packet is an OOTB packet which is temporarily on the
* control endpoint, respond with an ABORT.
...
...
@@ -623,7 +622,7 @@ sctp_disposition_t sctp_sf_do_5_1D_ce(const sctp_endpoint_t *ep,
return
SCTP_DISPOSITION_CONSUME
;
nomem_ev:
sctp_
free_chunk
(
repl
);
sctp_
chunk_free
(
repl
);
nomem_repl:
nomem_init:
sctp_association_free
(
new_asoc
);
...
...
@@ -652,8 +651,8 @@ sctp_disposition_t sctp_sf_do_5_1D_ce(const sctp_endpoint_t *ep,
*
* The return value is the disposition of the chunk.
*/
sctp_disposition_t
sctp_sf_do_5_1E_ca
(
const
s
ctp_endpoint_
t
*
ep
,
const
s
ctp_association_t
*
asoc
,
sctp_disposition_t
sctp_sf_do_5_1E_ca
(
const
s
truct
sctp_endpoin
t
*
ep
,
const
s
truct
sctp_association
*
asoc
,
const
sctp_subtype_t
type
,
void
*
arg
,
sctp_cmd_seq_t
*
commands
)
{
...
...
@@ -697,14 +696,14 @@ sctp_disposition_t sctp_sf_do_5_1E_ca(const sctp_endpoint_t *ep,
}
/* Generate and sendout a heartbeat packet. */
sctp_disposition_t
sctp_sf_heartbeat
(
const
s
ctp_endpoint_
t
*
ep
,
const
s
ctp_association_t
*
asoc
,
sctp_disposition_t
sctp_sf_heartbeat
(
const
s
truct
sctp_endpoin
t
*
ep
,
const
s
truct
sctp_association
*
asoc
,
const
sctp_subtype_t
type
,
void
*
arg
,
sctp_cmd_seq_t
*
commands
)
{
struct
sctp_transport
*
transport
=
(
struct
sctp_transport
*
)
arg
;
s
ctp_chunk_t
*
reply
;
s
truct
sctp_chunk
*
reply
;
sctp_sender_hb_info_t
hbinfo
;
size_t
paylen
=
0
;
...
...
@@ -730,8 +729,8 @@ sctp_disposition_t sctp_sf_heartbeat(const sctp_endpoint_t *ep,
}
/* Generate a HEARTBEAT packet on the given transport. */
sctp_disposition_t
sctp_sf_sendbeat_8_3
(
const
s
ctp_endpoint_
t
*
ep
,
const
s
ctp_association_t
*
asoc
,
sctp_disposition_t
sctp_sf_sendbeat_8_3
(
const
s
truct
sctp_endpoin
t
*
ep
,
const
s
truct
sctp_association
*
asoc
,
const
sctp_subtype_t
type
,
void
*
arg
,
sctp_cmd_seq_t
*
commands
)
...
...
@@ -740,7 +739,7 @@ sctp_disposition_t sctp_sf_sendbeat_8_3(const sctp_endpoint_t *ep,
if
(
asoc
->
overall_error_count
>=
asoc
->
overall_error_threshold
)
{
/* CMD_ASSOC_FAILED calls CMD_DELETE_TCB. */
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_ASSOC_FAILED
,
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_ASSOC_FAILED
,
SCTP_U32
(
SCTP_ERROR_NO_ERROR
));
SCTP_INC_STATS
(
SctpAborteds
);
SCTP_DEC_STATS
(
SctpCurrEstab
);
...
...
@@ -795,14 +794,14 @@ sctp_disposition_t sctp_sf_sendbeat_8_3(const sctp_endpoint_t *ep,
*
* The return value is the disposition of the chunk.
*/
sctp_disposition_t
sctp_sf_beat_8_3
(
const
s
ctp_endpoint_
t
*
ep
,
const
s
ctp_association_t
*
asoc
,
sctp_disposition_t
sctp_sf_beat_8_3
(
const
s
truct
sctp_endpoin
t
*
ep
,
const
s
truct
sctp_association
*
asoc
,
const
sctp_subtype_t
type
,
void
*
arg
,
sctp_cmd_seq_t
*
commands
)
{
s
ctp_chunk_t
*
chunk
=
arg
;
s
ctp_chunk_t
*
reply
;
s
truct
sctp_chunk
*
chunk
=
arg
;
s
truct
sctp_chunk
*
reply
;
size_t
paylen
=
0
;
/* 8.5 When receiving an SCTP packet, the endpoint MUST ensure
...
...
@@ -862,13 +861,13 @@ sctp_disposition_t sctp_sf_beat_8_3(const sctp_endpoint_t *ep,
*
* The return value is the disposition of the chunk.
*/
sctp_disposition_t
sctp_sf_backbeat_8_3
(
const
s
ctp_endpoint_
t
*
ep
,
const
s
ctp_association_t
*
asoc
,
sctp_disposition_t
sctp_sf_backbeat_8_3
(
const
s
truct
sctp_endpoin
t
*
ep
,
const
s
truct
sctp_association
*
asoc
,
const
sctp_subtype_t
type
,
void
*
arg
,
sctp_cmd_seq_t
*
commands
)
{
s
ctp_chunk_t
*
chunk
=
arg
;
s
truct
sctp_chunk
*
chunk
=
arg
;
union
sctp_addr
from_addr
;
struct
sctp_transport
*
link
;
sctp_sender_hb_info_t
*
hbinfo
;
...
...
@@ -919,14 +918,14 @@ sctp_disposition_t sctp_sf_backbeat_8_3(const sctp_endpoint_t *ep,
* condition.
*/
static
int
sctp_sf_send_restart_abort
(
union
sctp_addr
*
ssa
,
s
ctp_chunk_t
*
init
,
s
truct
sctp_chunk
*
init
,
sctp_cmd_seq_t
*
commands
)
{
int
len
;
struct
sctp_packet
*
pkt
;
sctp_addr_param_t
*
addrparm
;
sctp_errhdr_t
*
errhdr
;
s
ctp_endpoint_
t
*
ep
;
s
truct
sctp_endpoin
t
*
ep
;
char
buffer
[
sizeof
(
sctp_errhdr_t
)
+
sizeof
(
sctp_addr_param_t
)];
/* Build the error on the stack. We are way to malloc crazy
...
...
@@ -969,9 +968,9 @@ static int sctp_sf_send_restart_abort(union sctp_addr *ssa,
/* A restart is occurring, check to make sure no new addresses
* are being added as we may be under a takeover attack.
*/
static
int
sctp_sf_check_restart_addrs
(
const
s
ctp_association_t
*
new_asoc
,
const
s
ctp_association_t
*
asoc
,
s
ctp_chunk_t
*
init
,
static
int
sctp_sf_check_restart_addrs
(
const
s
truct
sctp_association
*
new_asoc
,
const
s
truct
sctp_association
*
asoc
,
s
truct
sctp_chunk
*
init
,
sctp_cmd_seq_t
*
commands
)
{
struct
sctp_transport
*
new_addr
,
*
addr
;
...
...
@@ -1022,8 +1021,8 @@ static int sctp_sf_check_restart_addrs(const sctp_association_t *new_asoc,
*
* Note: Do not use in CLOSED or SHUTDOWN-ACK-SENT state.
*/
static
void
sctp_tietags_populate
(
s
ctp_association_t
*
new_asoc
,
const
s
ctp_association_t
*
asoc
)
static
void
sctp_tietags_populate
(
s
truct
sctp_association
*
new_asoc
,
const
s
truct
sctp_association
*
asoc
)
{
switch
(
asoc
->
state
)
{
...
...
@@ -1069,8 +1068,8 @@ static void sctp_tietags_populate(sctp_association_t *new_asoc,
* Returns value representing action to be taken. These action values
* correspond to Action/Description values in RFC 2960, Table 2.
*/
static
char
sctp_tietags_compare
(
s
ctp_association_t
*
new_asoc
,
const
s
ctp_association_t
*
asoc
)
static
char
sctp_tietags_compare
(
s
truct
sctp_association
*
new_asoc
,
const
s
truct
sctp_association
*
asoc
)
{
/* In this case, the peer may have restarted. */
if
((
asoc
->
c
.
my_vtag
!=
new_asoc
->
c
.
my_vtag
)
&&
...
...
@@ -1106,16 +1105,16 @@ static char sctp_tietags_compare(sctp_association_t *new_asoc,
* chunk handling.
*/
static
sctp_disposition_t
sctp_sf_do_unexpected_init
(
const
s
ctp_endpoint_
t
*
ep
,
const
s
ctp_association_t
*
asoc
,
const
s
truct
sctp_endpoin
t
*
ep
,
const
s
truct
sctp_association
*
asoc
,
const
sctp_subtype_t
type
,
void
*
arg
,
sctp_cmd_seq_t
*
commands
)
{
sctp_disposition_t
retval
;
s
ctp_chunk_t
*
chunk
=
arg
;
s
ctp_chunk_t
*
repl
;
s
ctp_association_t
*
new_asoc
;
s
ctp_chunk_t
*
err_chunk
;
s
truct
sctp_chunk
*
chunk
=
arg
;
s
truct
sctp_chunk
*
repl
;
s
truct
sctp_association
*
new_asoc
;
s
truct
sctp_chunk
*
err_chunk
;
struct
sctp_packet
*
packet
;
sctp_unrecognized_param_t
*
unk_param
;
int
len
;
...
...
@@ -1191,7 +1190,7 @@ static sctp_disposition_t sctp_sf_do_unexpected_init(
* since there are no peer addresses to check against.
* Upon return an ABORT will have been sent if needed.
*/
if
(
asoc
->
state
!=
SCTP_STATE_COOKIE_WAIT
)
{
if
(
!
sctp_state
(
asoc
,
COOKIE_WAIT
)
)
{
if
(
!
sctp_sf_check_restart_addrs
(
new_asoc
,
asoc
,
chunk
,
commands
))
{
retval
=
SCTP_DISPOSITION_CONSUME
;
...
...
@@ -1238,7 +1237,7 @@ static sctp_disposition_t sctp_sf_do_unexpected_init(
* parameter type.
*/
sctp_addto_chunk
(
repl
,
len
,
unk_param
);
sctp_
free_chunk
(
err_chunk
);
sctp_
chunk_free
(
err_chunk
);
}
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_NEW_ASOC
,
SCTP_ASOC
(
new_asoc
));
...
...
@@ -1254,7 +1253,7 @@ static sctp_disposition_t sctp_sf_do_unexpected_init(
cleanup:
if
(
err_chunk
)
sctp_
free_chunk
(
err_chunk
);
sctp_
chunk_free
(
err_chunk
);
return
retval
;
nomem:
retval
=
SCTP_DISPOSITION_NOMEM
;
...
...
@@ -1303,11 +1302,11 @@ static sctp_disposition_t sctp_sf_do_unexpected_init(
*
* The return value is the disposition of the chunk.
*/
sctp_disposition_t
sctp_sf_do_5_2_1_siminit
(
const
s
ctp_endpoint_
t
*
ep
,
const
sctp_association_t
*
asoc
,
const
sctp_subtype_t
type
,
void
*
arg
,
sctp_cmd_seq_t
*
commands
)
sctp_disposition_t
sctp_sf_do_5_2_1_siminit
(
const
s
truct
sctp_endpoin
t
*
ep
,
const
struct
sctp_association
*
asoc
,
const
sctp_subtype_t
type
,
void
*
arg
,
sctp_cmd_seq_t
*
commands
)
{
/* Call helper to do the real work for both simulataneous and
* duplicate INIT chunk handling.
...
...
@@ -1356,11 +1355,11 @@ sctp_disposition_t sctp_sf_do_5_2_1_siminit(const sctp_endpoint_t *ep,
*
* The return value is the disposition of the chunk.
*/
sctp_disposition_t
sctp_sf_do_5_2_2_dupinit
(
const
s
ctp_endpoint_
t
*
ep
,
const
sctp_association_t
*
asoc
,
const
sctp_subtype_t
type
,
void
*
arg
,
sctp_cmd_seq_t
*
commands
)
sctp_disposition_t
sctp_sf_do_5_2_2_dupinit
(
const
s
truct
sctp_endpoin
t
*
ep
,
const
struct
sctp_association
*
asoc
,
const
sctp_subtype_t
type
,
void
*
arg
,
sctp_cmd_seq_t
*
commands
)
{
/* Call helper to do the real work for both simulataneous and
* duplicate INIT chunk handling.
...
...
@@ -1375,15 +1374,15 @@ sctp_disposition_t sctp_sf_do_5_2_2_dupinit(const sctp_endpoint_t *ep,
* Section 5.2.4
* A) In this case, the peer may have restarted.
*/
static
sctp_disposition_t
sctp_sf_do_dupcook_a
(
const
s
ctp_endpoint_
t
*
ep
,
const
sctp_association_t
*
asoc
,
sctp_chunk_t
*
chunk
,
sctp_cmd_seq_t
*
commands
,
sctp_association_t
*
new_asoc
)
static
sctp_disposition_t
sctp_sf_do_dupcook_a
(
const
s
truct
sctp_endpoin
t
*
ep
,
const
struct
sctp_association
*
asoc
,
struct
sctp_chunk
*
chunk
,
sctp_cmd_seq_t
*
commands
,
struct
sctp_association
*
new_asoc
)
{
sctp_init_chunk_t
*
peer_init
;
struct
sctp_ulpevent
*
ev
;
s
ctp_chunk_t
*
repl
;
s
truct
sctp_chunk
*
repl
;
/* new_asoc is a brand-new association, so these are not yet
* side effects--it is safe to run them here.
...
...
@@ -1391,7 +1390,7 @@ static sctp_disposition_t sctp_sf_do_dupcook_a(const sctp_endpoint_t *ep,
peer_init
=
&
chunk
->
subh
.
cookie_hdr
->
c
.
peer_init
[
0
];
if
(
!
sctp_process_init
(
new_asoc
,
chunk
->
chunk_hdr
->
type
,
sctp_source
(
chunk
),
peer_init
,
sctp_source
(
chunk
),
peer_init
,
GFP_ATOMIC
))
goto
nomem
;
...
...
@@ -1429,7 +1428,7 @@ static sctp_disposition_t sctp_sf_do_dupcook_a(const sctp_endpoint_t *ep,
return
SCTP_DISPOSITION_CONSUME
;
nomem_ev:
sctp_
free_chunk
(
repl
);
sctp_
chunk_free
(
repl
);
nomem:
return
SCTP_DISPOSITION_NOMEM
;
}
...
...
@@ -1442,22 +1441,22 @@ static sctp_disposition_t sctp_sf_do_dupcook_a(const sctp_endpoint_t *ep,
* after responding to the local endpoint's INIT
*/
/* This case represents an initialization collision. */
static
sctp_disposition_t
sctp_sf_do_dupcook_b
(
const
s
ctp_endpoint_
t
*
ep
,
const
sctp_association_t
*
asoc
,
sctp_chunk_t
*
chunk
,
sctp_cmd_seq_t
*
commands
,
sctp_association_t
*
new_asoc
)
static
sctp_disposition_t
sctp_sf_do_dupcook_b
(
const
s
truct
sctp_endpoin
t
*
ep
,
const
struct
sctp_association
*
asoc
,
struct
sctp_chunk
*
chunk
,
sctp_cmd_seq_t
*
commands
,
struct
sctp_association
*
new_asoc
)
{
sctp_init_chunk_t
*
peer_init
;
struct
sctp_ulpevent
*
ev
;
s
ctp_chunk_t
*
repl
;
s
truct
sctp_chunk
*
repl
;
/* new_asoc is a brand-new association, so these are not yet
* side effects--it is safe to run them here.
*/
peer_init
=
&
chunk
->
subh
.
cookie_hdr
->
c
.
peer_init
[
0
];
if
(
!
sctp_process_init
(
new_asoc
,
chunk
->
chunk_hdr
->
type
,
sctp_source
(
chunk
),
peer_init
,
sctp_source
(
chunk
),
peer_init
,
GFP_ATOMIC
))
goto
nomem
;
...
...
@@ -1492,7 +1491,7 @@ static sctp_disposition_t sctp_sf_do_dupcook_b(const sctp_endpoint_t *ep,
return
SCTP_DISPOSITION_CONSUME
;
nomem_ev:
sctp_
free_chunk
(
repl
);
sctp_
chunk_free
(
repl
);
nomem:
return
SCTP_DISPOSITION_NOMEM
;
}
...
...
@@ -1506,11 +1505,11 @@ static sctp_disposition_t sctp_sf_do_dupcook_b(const sctp_endpoint_t *ep,
* but a new tag of its own.
*/
/* This case represents an initialization collision. */
static
sctp_disposition_t
sctp_sf_do_dupcook_c
(
const
s
ctp_endpoint_
t
*
ep
,
const
sctp_association_t
*
asoc
,
sctp_chunk_t
*
chunk
,
sctp_cmd_seq_t
*
commands
,
sctp_association_t
*
new_asoc
)
static
sctp_disposition_t
sctp_sf_do_dupcook_c
(
const
s
truct
sctp_endpoin
t
*
ep
,
const
struct
sctp_association
*
asoc
,
struct
sctp_chunk
*
chunk
,
sctp_cmd_seq_t
*
commands
,
struct
sctp_association
*
new_asoc
)
{
/* The cookie should be silently discarded.
* The endpoint SHOULD NOT change states and should leave
...
...
@@ -1527,14 +1526,14 @@ static sctp_disposition_t sctp_sf_do_dupcook_c(const sctp_endpoint_t *ep,
* enter the ESTABLISHED state, if it has not already done so.
*/
/* This case represents an initialization collision. */
static
sctp_disposition_t
sctp_sf_do_dupcook_d
(
const
s
ctp_endpoint_
t
*
ep
,
const
sctp_association_t
*
asoc
,
sctp_chunk_t
*
chunk
,
sctp_cmd_seq_t
*
commands
,
sctp_association_t
*
new_asoc
)
static
sctp_disposition_t
sctp_sf_do_dupcook_d
(
const
s
truct
sctp_endpoin
t
*
ep
,
const
struct
sctp_association
*
asoc
,
struct
sctp_chunk
*
chunk
,
sctp_cmd_seq_t
*
commands
,
struct
sctp_association
*
new_asoc
)
{
struct
sctp_ulpevent
*
ev
=
NULL
;
s
ctp_chunk_t
*
repl
;
s
truct
sctp_chunk
*
repl
;
/* Clarification from Implementor's Guide:
* D) When both local and remote tags match the endpoint should
...
...
@@ -1603,18 +1602,18 @@ static sctp_disposition_t sctp_sf_do_dupcook_d(const sctp_endpoint_t *ep,
*
* The return value is the disposition of the chunk.
*/
sctp_disposition_t
sctp_sf_do_5_2_4_dupcook
(
const
s
ctp_endpoint_
t
*
ep
,
const
sctp_association_t
*
asoc
,
const
sctp_subtype_t
type
,
void
*
arg
,
sctp_cmd_seq_t
*
commands
)
sctp_disposition_t
sctp_sf_do_5_2_4_dupcook
(
const
s
truct
sctp_endpoin
t
*
ep
,
const
struct
sctp_association
*
asoc
,
const
sctp_subtype_t
type
,
void
*
arg
,
sctp_cmd_seq_t
*
commands
)
{
sctp_disposition_t
retval
;
s
ctp_chunk_t
*
chunk
=
arg
;
s
ctp_association_t
*
new_asoc
;
s
truct
sctp_chunk
*
chunk
=
arg
;
s
truct
sctp_association
*
new_asoc
;
int
error
=
0
;
char
action
;
s
ctp_chunk_t
*
err_chk_p
;
s
truct
sctp_chunk
*
err_chk_p
;
/* "Decode" the chunk. We have no optional parameters so we
* are in good shape.
...
...
@@ -1701,13 +1700,14 @@ sctp_disposition_t sctp_sf_do_5_2_4_dupcook(const sctp_endpoint_t *ep,
*
* See sctp_sf_do_9_1_abort().
*/
sctp_disposition_t
sctp_sf_shutdown_pending_abort
(
const
sctp_endpoint_t
*
ep
,
const
sctp_association_t
*
asoc
,
const
sctp_subtype_t
type
,
void
*
arg
,
sctp_cmd_seq_t
*
commands
)
sctp_disposition_t
sctp_sf_shutdown_pending_abort
(
const
struct
sctp_endpoint
*
ep
,
const
struct
sctp_association
*
asoc
,
const
sctp_subtype_t
type
,
void
*
arg
,
sctp_cmd_seq_t
*
commands
)
{
s
ctp_chunk_t
*
chunk
=
arg
;
s
truct
sctp_chunk
*
chunk
=
arg
;
if
(
!
sctp_vtag_verify_either
(
chunk
,
asoc
))
return
sctp_sf_pdiscard
(
ep
,
asoc
,
type
,
arg
,
commands
);
...
...
@@ -1724,13 +1724,13 @@ sctp_disposition_t sctp_sf_shutdown_pending_abort(const sctp_endpoint_t *ep,
*
* See sctp_sf_do_9_1_abort().
*/
sctp_disposition_t
sctp_sf_shutdown_sent_abort
(
const
s
ctp_endpoint_
t
*
ep
,
const
sctp_association_t
*
asoc
,
const
sctp_subtype_t
type
,
void
*
arg
,
sctp_cmd_seq_t
*
commands
)
sctp_disposition_t
sctp_sf_shutdown_sent_abort
(
const
s
truct
sctp_endpoin
t
*
ep
,
const
struct
sctp_association
*
asoc
,
const
sctp_subtype_t
type
,
void
*
arg
,
sctp_cmd_seq_t
*
commands
)
{
s
ctp_chunk_t
*
chunk
=
arg
;
s
truct
sctp_chunk
*
chunk
=
arg
;
if
(
!
sctp_vtag_verify_either
(
chunk
,
asoc
))
return
sctp_sf_pdiscard
(
ep
,
asoc
,
type
,
arg
,
commands
);
...
...
@@ -1751,11 +1751,12 @@ sctp_disposition_t sctp_sf_shutdown_sent_abort(const sctp_endpoint_t *ep,
*
* See sctp_sf_do_9_1_abort().
*/
sctp_disposition_t
sctp_sf_shutdown_ack_sent_abort
(
const
sctp_endpoint_t
*
ep
,
const
sctp_association_t
*
asoc
,
const
sctp_subtype_t
type
,
void
*
arg
,
sctp_cmd_seq_t
*
commands
)
sctp_disposition_t
sctp_sf_shutdown_ack_sent_abort
(
const
struct
sctp_endpoint
*
ep
,
const
struct
sctp_association
*
asoc
,
const
sctp_subtype_t
type
,
void
*
arg
,
sctp_cmd_seq_t
*
commands
)
{
/* The same T2 timer, so we should be able to use
* common function with the SHUTDOWN-SENT state.
...
...
@@ -1777,13 +1778,13 @@ sctp_disposition_t sctp_sf_shutdown_ack_sent_abort(const sctp_endpoint_t *ep,
*
* The return value is the disposition of the chunk.
*/
sctp_disposition_t
sctp_sf_cookie_echoed_err
(
const
s
ctp_endpoint_
t
*
ep
,
const
sctp_association_t
*
asoc
,
const
sctp_subtype_t
type
,
void
*
arg
,
sctp_cmd_seq_t
*
commands
)
sctp_disposition_t
sctp_sf_cookie_echoed_err
(
const
s
truct
sctp_endpoin
t
*
ep
,
const
struct
sctp_association
*
asoc
,
const
sctp_subtype_t
type
,
void
*
arg
,
sctp_cmd_seq_t
*
commands
)
{
s
ctp_chunk_t
*
chunk
=
arg
;
s
truct
sctp_chunk
*
chunk
=
arg
;
sctp_errhdr_t
*
err
;
err
=
(
sctp_errhdr_t
*
)(
chunk
->
skb
->
data
);
...
...
@@ -1792,7 +1793,7 @@ sctp_disposition_t sctp_sf_cookie_echoed_err(const sctp_endpoint_t *ep,
if
(
1
+
asoc
->
counters
[
SCTP_COUNTER_INIT_ERROR
]
>
asoc
->
max_init_attempts
)
{
/* INIT_FAILED will issue an ulpevent. */
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_INIT_FAILED
,
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_INIT_FAILED
,
SCTP_U32
(
err
->
cause
));
return
SCTP_DISPOSITION_DELETE_TCB
;
}
...
...
@@ -1831,26 +1832,26 @@ sctp_disposition_t sctp_sf_cookie_echoed_err(const sctp_endpoint_t *ep,
*
* The return value is the disposition of the chunk.
*/
sctp_disposition_t
sctp_sf_do_5_2_6_stale
(
const
s
ctp_endpoint_
t
*
ep
,
const
s
ctp_association_t
*
asoc
,
sctp_disposition_t
sctp_sf_do_5_2_6_stale
(
const
s
truct
sctp_endpoin
t
*
ep
,
const
s
truct
sctp_association
*
asoc
,
const
sctp_subtype_t
type
,
void
*
arg
,
sctp_cmd_seq_t
*
commands
)
{
s
ctp_chunk_t
*
chunk
=
arg
;
s
truct
sctp_chunk
*
chunk
=
arg
;
time_t
stale
;
sctp_cookie_preserve_param_t
bht
;
sctp_errhdr_t
*
err
;
struct
list_head
*
pos
;
struct
sctp_transport
*
t
;
s
ctp_chunk_t
*
reply
;
s
ctp_bind_addr_t
*
bp
;
s
truct
sctp_chunk
*
reply
;
s
truct
sctp_bind_addr
*
bp
;
int
attempts
;
attempts
=
asoc
->
counters
[
SCTP_COUNTER_INIT_ERROR
]
+
1
;
if
(
attempts
>=
asoc
->
max_init_attempts
)
{
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_INIT_FAILED
,
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_INIT_FAILED
,
SCTP_U32
(
SCTP_ERROR_STALE_COOKIE
));
return
SCTP_DISPOSITION_DELETE_TCB
;
}
...
...
@@ -1879,7 +1880,7 @@ sctp_disposition_t sctp_sf_do_5_2_6_stale(const sctp_endpoint_t *ep,
bht
.
lifespan_increment
=
htonl
(
stale
);
/* Build that new INIT chunk. */
bp
=
(
s
ctp_bind_addr_t
*
)
&
asoc
->
base
.
bind_addr
;
bp
=
(
s
truct
sctp_bind_addr
*
)
&
asoc
->
base
.
bind_addr
;
reply
=
sctp_make_init
(
asoc
,
bp
,
GFP_ATOMIC
,
sizeof
(
bht
));
if
(
!
reply
)
goto
nomem
;
...
...
@@ -1946,20 +1947,20 @@ sctp_disposition_t sctp_sf_do_5_2_6_stale(const sctp_endpoint_t *ep,
*
* The return value is the disposition of the chunk.
*/
sctp_disposition_t
sctp_sf_do_9_1_abort
(
const
s
ctp_endpoint_
t
*
ep
,
const
s
ctp_association_t
*
asoc
,
sctp_disposition_t
sctp_sf_do_9_1_abort
(
const
s
truct
sctp_endpoin
t
*
ep
,
const
s
truct
sctp_association
*
asoc
,
const
sctp_subtype_t
type
,
void
*
arg
,
sctp_cmd_seq_t
*
commands
)
{
s
ctp_chunk_t
*
chunk
=
arg
;
s
truct
sctp_chunk
*
chunk
=
arg
;
__u16
error
=
SCTP_ERROR_NO_ERROR
;
if
(
!
sctp_vtag_verify_either
(
chunk
,
asoc
))
return
sctp_sf_pdiscard
(
ep
,
asoc
,
type
,
arg
,
commands
);
if
(
chunk
&&
(
ntohs
(
chunk
->
chunk_hdr
->
length
)
>=
(
sizeof
(
struct
sctp_chunkhdr
)
+
(
sizeof
(
struct
sctp_chunkhdr
)
+
sizeof
(
struct
sctp_errhdr
))))
error
=
((
sctp_errhdr_t
*
)
chunk
->
skb
->
data
)
->
cause
;
...
...
@@ -1977,13 +1978,13 @@ sctp_disposition_t sctp_sf_do_9_1_abort(const sctp_endpoint_t *ep,
*
* See sctp_sf_do_9_1_abort() above.
*/
sctp_disposition_t
sctp_sf_cookie_wait_abort
(
const
s
ctp_endpoint_
t
*
ep
,
const
sctp_association_t
*
asoc
,
const
sctp_subtype_t
type
,
void
*
arg
,
sctp_cmd_seq_t
*
commands
)
sctp_disposition_t
sctp_sf_cookie_wait_abort
(
const
s
truct
sctp_endpoin
t
*
ep
,
const
struct
sctp_association
*
asoc
,
const
sctp_subtype_t
type
,
void
*
arg
,
sctp_cmd_seq_t
*
commands
)
{
s
ctp_chunk_t
*
chunk
=
arg
;
s
truct
sctp_chunk
*
chunk
=
arg
;
__u16
error
=
SCTP_ERROR_NO_ERROR
;
if
(
!
sctp_vtag_verify_either
(
chunk
,
asoc
))
...
...
@@ -1996,7 +1997,7 @@ sctp_disposition_t sctp_sf_cookie_wait_abort(const sctp_endpoint_t *ep,
SCTP_TO
(
SCTP_EVENT_TIMEOUT_T1_INIT
));
if
(
chunk
&&
(
ntohs
(
chunk
->
chunk_hdr
->
length
)
>=
(
sizeof
(
struct
sctp_chunkhdr
)
+
(
sizeof
(
struct
sctp_chunkhdr
)
+
sizeof
(
struct
sctp_errhdr
))))
error
=
((
sctp_errhdr_t
*
)
chunk
->
skb
->
data
)
->
cause
;
...
...
@@ -2011,8 +2012,8 @@ sctp_disposition_t sctp_sf_cookie_wait_abort(const sctp_endpoint_t *ep,
*
* See sctp_sf_do_9_1_abort() above.
*/
sctp_disposition_t
sctp_sf_cookie_echoed_abort
(
const
s
ctp_endpoint_
t
*
ep
,
const
s
ctp_association_t
*
asoc
,
sctp_disposition_t
sctp_sf_cookie_echoed_abort
(
const
s
truct
sctp_endpoin
t
*
ep
,
const
s
truct
sctp_association
*
asoc
,
const
sctp_subtype_t
type
,
void
*
arg
,
sctp_cmd_seq_t
*
commands
)
...
...
@@ -2056,13 +2057,13 @@ sctp_disposition_t sctp_sf_cookie_echoed_abort(const sctp_endpoint_t *ep,
*
* The return value is the disposition of the chunk.
*/
sctp_disposition_t
sctp_sf_do_9_2_shutdown
(
const
s
ctp_endpoint_
t
*
ep
,
const
s
ctp_association_t
*
asoc
,
sctp_disposition_t
sctp_sf_do_9_2_shutdown
(
const
s
truct
sctp_endpoin
t
*
ep
,
const
s
truct
sctp_association
*
asoc
,
const
sctp_subtype_t
type
,
void
*
arg
,
sctp_cmd_seq_t
*
commands
)
{
s
ctp_chunk_t
*
chunk
=
arg
;
s
truct
sctp_chunk
*
chunk
=
arg
;
sctp_shutdownhdr_t
*
sdh
;
sctp_disposition_t
disposition
;
...
...
@@ -2111,14 +2112,14 @@ sctp_disposition_t sctp_sf_do_9_2_shutdown(const sctp_endpoint_t *ep,
* that belong to this association, it should discard the INIT chunk and
* retransmit the SHUTDOWN ACK chunk.
*/
sctp_disposition_t
sctp_sf_do_9_2_reshutack
(
const
s
ctp_endpoint_
t
*
ep
,
const
sctp_association_t
*
asoc
,
const
sctp_subtype_t
type
,
void
*
arg
,
sctp_cmd_seq_t
*
commands
)
sctp_disposition_t
sctp_sf_do_9_2_reshutack
(
const
s
truct
sctp_endpoin
t
*
ep
,
const
struct
sctp_association
*
asoc
,
const
sctp_subtype_t
type
,
void
*
arg
,
sctp_cmd_seq_t
*
commands
)
{
s
ctp_chunk_t
*
chunk
=
(
sctp_chunk_t
*
)
arg
;
s
ctp_chunk_t
*
reply
;
s
truct
sctp_chunk
*
chunk
=
(
struct
sctp_chunk
*
)
arg
;
s
truct
sctp_chunk
*
reply
;
reply
=
sctp_make_shutdown_ack
(
asoc
,
chunk
);
if
(
NULL
==
reply
)
...
...
@@ -2165,14 +2166,14 @@ sctp_disposition_t sctp_sf_do_9_2_reshutack(const sctp_endpoint_t *ep,
*
* The return value is the disposition of the chunk.
*/
sctp_disposition_t
sctp_sf_do_ecn_cwr
(
const
s
ctp_endpoint_
t
*
ep
,
const
s
ctp_association_t
*
asoc
,
sctp_disposition_t
sctp_sf_do_ecn_cwr
(
const
s
truct
sctp_endpoin
t
*
ep
,
const
s
truct
sctp_association
*
asoc
,
const
sctp_subtype_t
type
,
void
*
arg
,
sctp_cmd_seq_t
*
commands
)
{
sctp_cwrhdr_t
*
cwr
;
s
ctp_chunk_t
*
chunk
=
arg
;
s
truct
sctp_chunk
*
chunk
=
arg
;
/* 8.5 When receiving an SCTP packet, the endpoint MUST ensure
* that the value in the Verification Tag field of the
...
...
@@ -2221,14 +2222,14 @@ sctp_disposition_t sctp_sf_do_ecn_cwr(const sctp_endpoint_t *ep,
*
* The return value is the disposition of the chunk.
*/
sctp_disposition_t
sctp_sf_do_ecne
(
const
s
ctp_endpoint_
t
*
ep
,
const
s
ctp_association_t
*
asoc
,
sctp_disposition_t
sctp_sf_do_ecne
(
const
s
truct
sctp_endpoin
t
*
ep
,
const
s
truct
sctp_association
*
asoc
,
const
sctp_subtype_t
type
,
void
*
arg
,
sctp_cmd_seq_t
*
commands
)
{
sctp_ecnehdr_t
*
ecne
;
s
ctp_chunk_t
*
chunk
=
arg
;
s
truct
sctp_chunk
*
chunk
=
arg
;
/* 8.5 When receiving an SCTP packet, the endpoint MUST ensure
* that the value in the Verification Tag field of the
...
...
@@ -2279,15 +2280,15 @@ sctp_disposition_t sctp_sf_do_ecne(const sctp_endpoint_t *ep,
*
* The return value is the disposition of the chunk.
*/
sctp_disposition_t
sctp_sf_eat_data_6_2
(
const
s
ctp_endpoint_
t
*
ep
,
const
s
ctp_association_t
*
asoc
,
sctp_disposition_t
sctp_sf_eat_data_6_2
(
const
s
truct
sctp_endpoin
t
*
ep
,
const
s
truct
sctp_association
*
asoc
,
const
sctp_subtype_t
type
,
void
*
arg
,
sctp_cmd_seq_t
*
commands
)
{
s
ctp_chunk_t
*
chunk
=
arg
;
s
truct
sctp_chunk
*
chunk
=
arg
;
sctp_datahdr_t
*
data_hdr
;
s
ctp_chunk_t
*
err
;
s
truct
sctp_chunk
*
err
;
size_t
datalen
;
sctp_verb_t
deliver
;
int
tmp
;
...
...
@@ -2309,9 +2310,7 @@ sctp_disposition_t sctp_sf_eat_data_6_2(const sctp_endpoint_t *ep,
skb_pull
(
chunk
->
skb
,
sizeof
(
sctp_datahdr_t
));
tsn
=
ntohl
(
data_hdr
->
tsn
);
SCTP_DEBUG_PRINTK
(
"eat_data: TSN 0x%x.
\n
"
,
tsn
);
SCTP_DEBUG_PRINTK
(
"eat_data: skb->head %p.
\n
"
,
chunk
->
skb
->
head
);
/* ASSERT: Now skb->data is really the user data. */
...
...
@@ -2326,11 +2325,15 @@ sctp_disposition_t sctp_sf_eat_data_6_2(const sctp_endpoint_t *ep,
*/
if
(
!
chunk
->
ecn_ce_done
)
{
struct
sctp_af
*
af
;
chunk
->
ecn_ce_done
=
1
;
if
(
INET_ECN_is_ce
(
chunk
->
skb
->
nh
.
iph
->
tos
)
&&
asoc
->
peer
.
ecn_capable
)
{
af
=
sctp_get_af_specific
(
ipver2af
(
chunk
->
skb
->
nh
.
iph
->
version
));
if
(
af
&&
af
->
is_ce
(
chunk
->
skb
)
&&
asoc
->
peer
.
ecn_capable
)
{
/* Do real work as sideffect. */
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_ECN_CE
,
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_ECN_CE
,
SCTP_U32
(
tsn
));
}
}
...
...
@@ -2371,7 +2374,8 @@ sctp_disposition_t sctp_sf_eat_data_6_2(const sctp_endpoint_t *ep,
* PMTU. In cases, such as loopback, this might be a rather
* large spill over.
*/
if
(
asoc
->
rwnd_over
||
(
datalen
>
asoc
->
rwnd
+
asoc
->
frag_point
))
{
if
(
!
asoc
->
rwnd
||
asoc
->
rwnd_over
||
(
datalen
>
asoc
->
rwnd
+
asoc
->
frag_point
))
{
/* If this is the next TSN, consider reneging to make
* room. Note: Playing nice with a confused sender. A
...
...
@@ -2409,7 +2413,7 @@ sctp_disposition_t sctp_sf_eat_data_6_2(const sctp_endpoint_t *ep,
* processing the rest of the chunks in the packet.
*/
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_DISCARD_PACKET
,
SCTP_NULL
());
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_ASSOC_FAILED
,
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_ASSOC_FAILED
,
SCTP_U32
(
SCTP_ERROR_NO_DATA
));
SCTP_INC_STATS
(
SctpAborteds
);
SCTP_DEC_STATS
(
SctpCurrEstab
);
...
...
@@ -2537,15 +2541,15 @@ sctp_disposition_t sctp_sf_eat_data_6_2(const sctp_endpoint_t *ep,
*
* The return value is the disposition of the chunk.
*/
sctp_disposition_t
sctp_sf_eat_data_fast_4_4
(
const
s
ctp_endpoint_
t
*
ep
,
const
sctp_association_t
*
asoc
,
const
sctp_subtype_t
type
,
void
*
arg
,
sctp_cmd_seq_t
*
commands
)
sctp_disposition_t
sctp_sf_eat_data_fast_4_4
(
const
s
truct
sctp_endpoin
t
*
ep
,
const
struct
sctp_association
*
asoc
,
const
sctp_subtype_t
type
,
void
*
arg
,
sctp_cmd_seq_t
*
commands
)
{
s
ctp_chunk_t
*
chunk
=
arg
;
s
truct
sctp_chunk
*
chunk
=
arg
;
sctp_datahdr_t
*
data_hdr
;
s
ctp_chunk_t
*
err
;
s
truct
sctp_chunk
*
err
;
size_t
datalen
;
int
tmp
;
__u32
tsn
;
...
...
@@ -2581,11 +2585,15 @@ sctp_disposition_t sctp_sf_eat_data_fast_4_4(const sctp_endpoint_t *ep,
* chunk later.
*/
if
(
!
chunk
->
ecn_ce_done
)
{
struct
sctp_af
*
af
;
chunk
->
ecn_ce_done
=
1
;
if
(
INET_ECN_is_ce
(
chunk
->
skb
->
nh
.
iph
->
tos
)
&&
asoc
->
peer
.
ecn_capable
)
{
af
=
sctp_get_af_specific
(
ipver2af
(
chunk
->
skb
->
nh
.
iph
->
version
));
if
(
af
&&
af
->
is_ce
(
chunk
->
skb
)
&&
asoc
->
peer
.
ecn_capable
)
{
/* Do real work as sideffect. */
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_ECN_CE
,
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_ECN_CE
,
SCTP_U32
(
tsn
));
}
}
...
...
@@ -2625,7 +2633,7 @@ sctp_disposition_t sctp_sf_eat_data_fast_4_4(const sctp_endpoint_t *ep,
* processing the rest of the chunks in the packet.
*/
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_DISCARD_PACKET
,
SCTP_NULL
());
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_ASSOC_FAILED
,
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_ASSOC_FAILED
,
SCTP_U32
(
SCTP_ERROR_NO_DATA
));
SCTP_INC_STATS
(
SctpAborteds
);
SCTP_DEC_STATS
(
SctpCurrEstab
);
...
...
@@ -2712,13 +2720,13 @@ sctp_disposition_t sctp_sf_eat_data_fast_4_4(const sctp_endpoint_t *ep,
*
* The return value is the disposition of the chunk.
*/
sctp_disposition_t
sctp_sf_eat_sack_6_2
(
const
s
ctp_endpoint_
t
*
ep
,
const
s
ctp_association_t
*
asoc
,
sctp_disposition_t
sctp_sf_eat_sack_6_2
(
const
s
truct
sctp_endpoin
t
*
ep
,
const
s
truct
sctp_association
*
asoc
,
const
sctp_subtype_t
type
,
void
*
arg
,
sctp_cmd_seq_t
*
commands
)
{
s
ctp_chunk_t
*
chunk
=
arg
;
s
truct
sctp_chunk
*
chunk
=
arg
;
sctp_sackhdr_t
*
sackh
;
__u32
ctsn
;
...
...
@@ -2773,15 +2781,15 @@ sctp_disposition_t sctp_sf_eat_sack_6_2(const sctp_endpoint_t *ep,
*
* The return value is the disposition of the chunk.
*/
sctp_disposition_t
sctp_sf_tabort_8_4_8
(
const
s
ctp_endpoint_
t
*
ep
,
const
s
ctp_association_t
*
asoc
,
sctp_disposition_t
sctp_sf_tabort_8_4_8
(
const
s
truct
sctp_endpoin
t
*
ep
,
const
s
truct
sctp_association
*
asoc
,
const
sctp_subtype_t
type
,
void
*
arg
,
sctp_cmd_seq_t
*
commands
)
{
struct
sctp_packet
*
packet
=
NULL
;
s
ctp_chunk_t
*
chunk
=
arg
;
s
ctp_chunk_t
*
abort
;
s
truct
sctp_chunk
*
chunk
=
arg
;
s
truct
sctp_chunk
*
abort
;
packet
=
sctp_ootb_pkt_new
(
asoc
,
chunk
);
...
...
@@ -2819,13 +2827,13 @@ sctp_disposition_t sctp_sf_tabort_8_4_8(const sctp_endpoint_t *ep,
*
* The return value is the disposition of the chunk.
*/
sctp_disposition_t
sctp_sf_operr_notify
(
const
s
ctp_endpoint_
t
*
ep
,
const
s
ctp_association_t
*
asoc
,
sctp_disposition_t
sctp_sf_operr_notify
(
const
s
truct
sctp_endpoin
t
*
ep
,
const
s
truct
sctp_association
*
asoc
,
const
sctp_subtype_t
type
,
void
*
arg
,
sctp_cmd_seq_t
*
commands
)
{
s
ctp_chunk_t
*
chunk
=
arg
;
s
truct
sctp_chunk
*
chunk
=
arg
;
struct
sctp_ulpevent
*
ev
;
while
(
chunk
->
chunk_end
>
chunk
->
skb
->
data
)
{
...
...
@@ -2856,14 +2864,14 @@ sctp_disposition_t sctp_sf_operr_notify(const sctp_endpoint_t *ep,
*
* The return value is the disposition.
*/
sctp_disposition_t
sctp_sf_do_9_2_final
(
const
s
ctp_endpoint_
t
*
ep
,
const
s
ctp_association_t
*
asoc
,
sctp_disposition_t
sctp_sf_do_9_2_final
(
const
s
truct
sctp_endpoin
t
*
ep
,
const
s
truct
sctp_association
*
asoc
,
const
sctp_subtype_t
type
,
void
*
arg
,
sctp_cmd_seq_t
*
commands
)
{
s
ctp_chunk_t
*
chunk
=
arg
;
s
ctp_chunk_t
*
reply
;
s
truct
sctp_chunk
*
chunk
=
arg
;
s
truct
sctp_chunk
*
reply
;
struct
sctp_ulpevent
*
ev
;
/* 10.2 H) SHUTDOWN COMPLETE notification
...
...
@@ -2924,13 +2932,13 @@ sctp_disposition_t sctp_sf_do_9_2_final(const sctp_endpoint_t *ep,
* TCB was found. After sending this ABORT, the receiver of the OOTB
* packet shall discard the OOTB packet and take no further action.
*/
sctp_disposition_t
sctp_sf_ootb
(
const
s
ctp_endpoint_
t
*
ep
,
const
s
ctp_association_t
*
asoc
,
sctp_disposition_t
sctp_sf_ootb
(
const
s
truct
sctp_endpoin
t
*
ep
,
const
s
truct
sctp_association
*
asoc
,
const
sctp_subtype_t
type
,
void
*
arg
,
sctp_cmd_seq_t
*
commands
)
{
s
ctp_chunk_t
*
chunk
=
arg
;
s
truct
sctp_chunk
*
chunk
=
arg
;
struct
sk_buff
*
skb
=
chunk
->
skb
;
sctp_chunkhdr_t
*
ch
;
__u8
*
ch_end
;
...
...
@@ -2975,15 +2983,15 @@ sctp_disposition_t sctp_sf_ootb(const sctp_endpoint_t *ep,
*
* The return value is the disposition of the chunk.
*/
sctp_disposition_t
sctp_sf_shut_8_4_5
(
const
s
ctp_endpoint_
t
*
ep
,
const
s
ctp_association_t
*
asoc
,
sctp_disposition_t
sctp_sf_shut_8_4_5
(
const
s
truct
sctp_endpoin
t
*
ep
,
const
s
truct
sctp_association
*
asoc
,
const
sctp_subtype_t
type
,
void
*
arg
,
sctp_cmd_seq_t
*
commands
)
{
struct
sctp_packet
*
packet
=
NULL
;
s
ctp_chunk_t
*
chunk
=
arg
;
s
ctp_chunk_t
*
shut
;
s
truct
sctp_chunk
*
chunk
=
arg
;
s
truct
sctp_chunk
*
shut
;
packet
=
sctp_ootb_pkt_new
(
asoc
,
chunk
);
...
...
@@ -3024,8 +3032,8 @@ sctp_disposition_t sctp_sf_shut_8_4_5(const sctp_endpoint_t *ep,
* chunks. --piggy ]
*
*/
sctp_disposition_t
sctp_sf_do_8_5_1_E_sa
(
const
s
ctp_endpoint_
t
*
ep
,
const
s
ctp_association_t
*
asoc
,
sctp_disposition_t
sctp_sf_do_8_5_1_E_sa
(
const
s
truct
sctp_endpoin
t
*
ep
,
const
s
truct
sctp_association
*
asoc
,
const
sctp_subtype_t
type
,
void
*
arg
,
sctp_cmd_seq_t
*
commands
)
...
...
@@ -3061,14 +3069,14 @@ sctp_disposition_t sctp_sf_do_8_5_1_E_sa(const sctp_endpoint_t *ep,
*
* The return value is the disposition of the chunk.
*/
sctp_disposition_t
sctp_sf_unk_chunk
(
const
s
ctp_endpoint_
t
*
ep
,
const
s
ctp_association_t
*
asoc
,
sctp_disposition_t
sctp_sf_unk_chunk
(
const
s
truct
sctp_endpoin
t
*
ep
,
const
s
truct
sctp_association
*
asoc
,
const
sctp_subtype_t
type
,
void
*
arg
,
sctp_cmd_seq_t
*
commands
)
{
s
ctp_chunk_t
*
unk_chunk
=
arg
;
s
ctp_chunk_t
*
err_chunk
;
s
truct
sctp_chunk
*
unk_chunk
=
arg
;
s
truct
sctp_chunk
*
err_chunk
;
sctp_chunkhdr_t
*
hdr
;
SCTP_DEBUG_PRINTK
(
"Processing the unknown chunk id %d.
\n
"
,
type
.
chunk
);
...
...
@@ -3140,8 +3148,8 @@ sctp_disposition_t sctp_sf_unk_chunk(const sctp_endpoint_t *ep,
*
* The return value is the disposition of the chunk.
*/
sctp_disposition_t
sctp_sf_discard_chunk
(
const
s
ctp_endpoint_
t
*
ep
,
const
s
ctp_association_t
*
asoc
,
sctp_disposition_t
sctp_sf_discard_chunk
(
const
s
truct
sctp_endpoin
t
*
ep
,
const
s
truct
sctp_association
*
asoc
,
const
sctp_subtype_t
type
,
void
*
arg
,
sctp_cmd_seq_t
*
commands
)
...
...
@@ -3169,8 +3177,8 @@ sctp_disposition_t sctp_sf_discard_chunk(const sctp_endpoint_t *ep,
*
* The return value is the disposition of the chunk.
*/
sctp_disposition_t
sctp_sf_pdiscard
(
const
s
ctp_endpoint_
t
*
ep
,
const
s
ctp_association_t
*
asoc
,
sctp_disposition_t
sctp_sf_pdiscard
(
const
s
truct
sctp_endpoin
t
*
ep
,
const
s
truct
sctp_association
*
asoc
,
const
sctp_subtype_t
type
,
void
*
arg
,
sctp_cmd_seq_t
*
commands
)
...
...
@@ -3198,13 +3206,13 @@ sctp_disposition_t sctp_sf_pdiscard(const sctp_endpoint_t *ep,
*
* The return value is the disposition of the chunk.
*/
sctp_disposition_t lucky(const s
ctp_endpoint_
t *ep,
const s
ctp_association_t
*asoc,
sctp_disposition_t lucky(const s
truct sctp_endpoin
t *ep,
const s
truct sctp_association
*asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
s
ctp_chunk_t
*chunk = arg;
s
truct sctp_chunk
*chunk = arg;
/* 8.5 When receiving an SCTP packet, the endpoint MUST ensure
* that the value in the Verification Tag field of the
...
...
@@ -3235,13 +3243,13 @@ sctp_disposition_t lucky(const sctp_endpoint_t *ep,
*
* The return value is the disposition of the chunk.
*/
sctp_disposition_t other_stupid(const s
ctp_endpoint_
t *ep,
const s
ctp_association_t
*asoc,
sctp_disposition_t other_stupid(const s
truct sctp_endpoin
t *ep,
const s
truct sctp_association
*asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
s
ctp_chunk_t
*chunk = arg;
s
truct sctp_chunk
*chunk = arg;
/* 8.5 When receiving an SCTP packet, the endpoint MUST ensure
* that the value in the Verification Tag field of the
...
...
@@ -3271,8 +3279,8 @@ sctp_disposition_t other_stupid(const sctp_endpoint_t *ep,
* We simply tag the chunk as a violation. The state machine will log
* the violation and continue.
*/
sctp_disposition_t
sctp_sf_violation
(
const
s
ctp_endpoint_
t
*
ep
,
const
s
ctp_association_t
*
asoc
,
sctp_disposition_t
sctp_sf_violation
(
const
s
truct
sctp_endpoin
t
*
ep
,
const
s
truct
sctp_association
*
asoc
,
const
sctp_subtype_t
type
,
void
*
arg
,
sctp_cmd_seq_t
*
commands
)
...
...
@@ -3309,7 +3317,7 @@ sctp_disposition_t sctp_sf_violation(const sctp_endpoint_t *ep,
* will be returned on successful establishment of the association. If
* SCTP is not able to open an SCTP association with the peer endpoint,
* an error is returned.
* [In the kernel implementation, the s
ctp_association_t
needs to
* [In the kernel implementation, the s
truct sctp_association
needs to
* be created BEFORE causing this primitive to run.]
*
* Other association parameters may be returned, including the
...
...
@@ -3340,13 +3348,13 @@ sctp_disposition_t sctp_sf_violation(const sctp_endpoint_t *ep,
*
* The return value is a disposition.
*/
sctp_disposition_t
sctp_sf_do_prm_asoc
(
const
s
ctp_endpoint_
t
*
ep
,
const
s
ctp_association_t
*
asoc
,
sctp_disposition_t
sctp_sf_do_prm_asoc
(
const
s
truct
sctp_endpoin
t
*
ep
,
const
s
truct
sctp_association
*
asoc
,
const
sctp_subtype_t
type
,
void
*
arg
,
sctp_cmd_seq_t
*
commands
)
{
s
ctp_chunk_t
*
repl
;
s
truct
sctp_chunk
*
repl
;
/* The comment below says that we enter COOKIE-WAIT AFTER
* sending the INIT, but that doesn't actually work in our
...
...
@@ -3371,7 +3379,7 @@ sctp_disposition_t sctp_sf_do_prm_asoc(const sctp_endpoint_t *ep,
* rerun it through as a sideffect.
*/
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_NEW_ASOC
,
SCTP_ASOC
((
s
ctp_association_t
*
)
asoc
));
SCTP_ASOC
((
s
truct
sctp_association
*
)
asoc
));
/* After sending the INIT, "A" starts the T1-init timer and
* enters the COOKIE-WAIT state.
...
...
@@ -3446,13 +3454,13 @@ sctp_disposition_t sctp_sf_do_prm_asoc(const sctp_endpoint_t *ep,
*
* The return value is the disposition.
*/
sctp_disposition_t
sctp_sf_do_prm_send
(
const
s
ctp_endpoint_
t
*
ep
,
const
s
ctp_association_t
*
asoc
,
sctp_disposition_t
sctp_sf_do_prm_send
(
const
s
truct
sctp_endpoin
t
*
ep
,
const
s
truct
sctp_association
*
asoc
,
const
sctp_subtype_t
type
,
void
*
arg
,
sctp_cmd_seq_t
*
commands
)
{
s
ctp_chunk_t
*
chunk
=
arg
;
s
truct
sctp_chunk
*
chunk
=
arg
;
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_REPLY
,
SCTP_CHUNK
(
chunk
));
return
SCTP_DISPOSITION_CONSUME
;
...
...
@@ -3484,11 +3492,12 @@ sctp_disposition_t sctp_sf_do_prm_send(const sctp_endpoint_t *ep,
*
* The return value is the disposition.
*/
sctp_disposition_t
sctp_sf_do_9_2_prm_shutdown
(
const
sctp_endpoint_t
*
ep
,
const
sctp_association_t
*
asoc
,
const
sctp_subtype_t
type
,
void
*
arg
,
sctp_cmd_seq_t
*
commands
)
sctp_disposition_t
sctp_sf_do_9_2_prm_shutdown
(
const
struct
sctp_endpoint
*
ep
,
const
struct
sctp_association
*
asoc
,
const
sctp_subtype_t
type
,
void
*
arg
,
sctp_cmd_seq_t
*
commands
)
{
int
disposition
;
...
...
@@ -3545,11 +3554,12 @@ sctp_disposition_t sctp_sf_do_9_2_prm_shutdown(const sctp_endpoint_t *ep,
*
* The return value is the disposition.
*/
sctp_disposition_t
sctp_sf_do_9_1_prm_abort
(
const
sctp_endpoint_t
*
ep
,
const
sctp_association_t
*
asoc
,
const
sctp_subtype_t
type
,
void
*
arg
,
sctp_cmd_seq_t
*
commands
)
sctp_disposition_t
sctp_sf_do_9_1_prm_abort
(
const
struct
sctp_endpoint
*
ep
,
const
struct
sctp_association
*
asoc
,
const
sctp_subtype_t
type
,
void
*
arg
,
sctp_cmd_seq_t
*
commands
)
{
/* From 9.1 Abort of an Association
* Upon receipt of the ABORT primitive from its upper
...
...
@@ -3560,7 +3570,7 @@ sctp_disposition_t sctp_sf_do_9_1_prm_abort(const sctp_endpoint_t *ep,
* if necessary to fill gaps.
*/
struct
msghdr
*
msg
=
arg
;
s
ctp_chunk_t
*
abort
;
s
truct
sctp_chunk
*
abort
;
sctp_disposition_t
retval
;
retval
=
SCTP_DISPOSITION_CONSUME
;
...
...
@@ -3577,7 +3587,7 @@ sctp_disposition_t sctp_sf_do_9_1_prm_abort(const sctp_endpoint_t *ep,
*/
/* Delete the established association. */
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_ASSOC_FAILED
,
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_ASSOC_FAILED
,
SCTP_U32
(
SCTP_ERROR_USER_ABORT
));
SCTP_INC_STATS
(
SctpAborteds
);
...
...
@@ -3587,8 +3597,8 @@ sctp_disposition_t sctp_sf_do_9_1_prm_abort(const sctp_endpoint_t *ep,
}
/* We tried an illegal operation on an association which is closed. */
sctp_disposition_t
sctp_sf_error_closed
(
const
s
ctp_endpoint_
t
*
ep
,
const
s
ctp_association_t
*
asoc
,
sctp_disposition_t
sctp_sf_error_closed
(
const
s
truct
sctp_endpoin
t
*
ep
,
const
s
truct
sctp_association
*
asoc
,
const
sctp_subtype_t
type
,
void
*
arg
,
sctp_cmd_seq_t
*
commands
)
...
...
@@ -3600,8 +3610,8 @@ sctp_disposition_t sctp_sf_error_closed(const sctp_endpoint_t *ep,
/* We tried an illegal operation on an association which is shutting
* down.
*/
sctp_disposition_t
sctp_sf_error_shutdown
(
const
s
ctp_endpoint_
t
*
ep
,
const
s
ctp_association_t
*
asoc
,
sctp_disposition_t
sctp_sf_error_shutdown
(
const
s
truct
sctp_endpoin
t
*
ep
,
const
s
truct
sctp_association
*
asoc
,
const
sctp_subtype_t
type
,
void
*
arg
,
sctp_cmd_seq_t
*
commands
)
...
...
@@ -3626,8 +3636,8 @@ sctp_disposition_t sctp_sf_error_shutdown(const sctp_endpoint_t *ep,
* (timers)
*/
sctp_disposition_t
sctp_sf_cookie_wait_prm_shutdown
(
const
s
ctp_endpoint_
t
*
ep
,
const
s
ctp_association_t
*
asoc
,
const
s
truct
sctp_endpoin
t
*
ep
,
const
s
truct
sctp_association
*
asoc
,
const
sctp_subtype_t
type
,
void
*
arg
,
sctp_cmd_seq_t
*
commands
)
...
...
@@ -3660,8 +3670,8 @@ sctp_disposition_t sctp_sf_cookie_wait_prm_shutdown(
* (timers)
*/
sctp_disposition_t
sctp_sf_cookie_echoed_prm_shutdown
(
const
s
ctp_endpoint_
t
*
ep
,
const
s
ctp_association_t
*
asoc
,
const
s
truct
sctp_endpoin
t
*
ep
,
const
s
truct
sctp_association
*
asoc
,
const
sctp_subtype_t
type
,
void
*
arg
,
sctp_cmd_seq_t
*
commands
)
{
...
...
@@ -3685,14 +3695,15 @@ sctp_disposition_t sctp_sf_cookie_echoed_prm_shutdown(
* Outputs
* (timers)
*/
sctp_disposition_t
sctp_sf_cookie_wait_prm_abort
(
const
sctp_endpoint_t
*
ep
,
const
sctp_association_t
*
asoc
,
const
sctp_subtype_t
type
,
void
*
arg
,
sctp_cmd_seq_t
*
commands
)
sctp_disposition_t
sctp_sf_cookie_wait_prm_abort
(
const
struct
sctp_endpoint
*
ep
,
const
struct
sctp_association
*
asoc
,
const
sctp_subtype_t
type
,
void
*
arg
,
sctp_cmd_seq_t
*
commands
)
{
struct
msghdr
*
msg
=
arg
;
s
ctp_chunk_t
*
abort
;
s
truct
sctp_chunk
*
abort
;
sctp_disposition_t
retval
;
/* Stop T1-init timer */
...
...
@@ -3717,7 +3728,7 @@ sctp_disposition_t sctp_sf_cookie_wait_prm_abort(const sctp_endpoint_t *ep,
*/
/* Delete the established association. */
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_INIT_FAILED
,
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_INIT_FAILED
,
SCTP_U32
(
SCTP_ERROR_USER_ABORT
));
return
retval
;
...
...
@@ -3737,11 +3748,12 @@ sctp_disposition_t sctp_sf_cookie_wait_prm_abort(const sctp_endpoint_t *ep,
* Outputs
* (timers)
*/
sctp_disposition_t
sctp_sf_cookie_echoed_prm_abort
(
const
sctp_endpoint_t
*
ep
,
const
sctp_association_t
*
asoc
,
const
sctp_subtype_t
type
,
void
*
arg
,
sctp_cmd_seq_t
*
commands
)
sctp_disposition_t
sctp_sf_cookie_echoed_prm_abort
(
const
struct
sctp_endpoint
*
ep
,
const
struct
sctp_association
*
asoc
,
const
sctp_subtype_t
type
,
void
*
arg
,
sctp_cmd_seq_t
*
commands
)
{
/* There is a single T1 timer, so we should be able to use
* common function with the COOKIE-WAIT state.
...
...
@@ -3762,8 +3774,8 @@ sctp_disposition_t sctp_sf_cookie_echoed_prm_abort(const sctp_endpoint_t *ep,
* (timers)
*/
sctp_disposition_t
sctp_sf_shutdown_pending_prm_abort
(
const
s
ctp_endpoint_
t
*
ep
,
const
s
ctp_association_t
*
asoc
,
const
s
truct
sctp_endpoin
t
*
ep
,
const
s
truct
sctp_association
*
asoc
,
const
sctp_subtype_t
type
,
void
*
arg
,
sctp_cmd_seq_t
*
commands
)
...
...
@@ -3788,8 +3800,8 @@ sctp_disposition_t sctp_sf_shutdown_pending_prm_abort(
* (timers)
*/
sctp_disposition_t
sctp_sf_shutdown_sent_prm_abort
(
const
s
ctp_endpoint_
t
*
ep
,
const
s
ctp_association_t
*
asoc
,
const
s
truct
sctp_endpoin
t
*
ep
,
const
s
truct
sctp_association
*
asoc
,
const
sctp_subtype_t
type
,
void
*
arg
,
sctp_cmd_seq_t
*
commands
)
...
...
@@ -3818,8 +3830,8 @@ sctp_disposition_t sctp_sf_shutdown_sent_prm_abort(
* (timers)
*/
sctp_disposition_t
sctp_sf_shutdown_ack_sent_prm_abort
(
const
s
ctp_endpoint_
t
*
ep
,
const
s
ctp_association_t
*
asoc
,
const
s
truct
sctp_endpoin
t
*
ep
,
const
s
truct
sctp_association
*
asoc
,
const
sctp_subtype_t
type
,
void
*
arg
,
sctp_cmd_seq_t
*
commands
)
...
...
@@ -3853,8 +3865,8 @@ sctp_disposition_t sctp_sf_shutdown_ack_sent_prm_abort(
* association on which a heartbeat should be issued.
*/
sctp_disposition_t
sctp_sf_do_prm_requestheartbeat
(
const
s
ctp_endpoint_
t
*
ep
,
const
s
ctp_association_t
*
asoc
,
const
s
truct
sctp_endpoin
t
*
ep
,
const
s
truct
sctp_association
*
asoc
,
const
sctp_subtype_t
type
,
void
*
arg
,
sctp_cmd_seq_t
*
commands
)
...
...
@@ -3868,11 +3880,12 @@ sctp_disposition_t sctp_sf_do_prm_requestheartbeat(
*
* The return value is the disposition of the primitive.
*/
sctp_disposition_t
sctp_sf_ignore_primitive
(
const
sctp_endpoint_t
*
ep
,
const
sctp_association_t
*
asoc
,
const
sctp_subtype_t
type
,
void
*
arg
,
sctp_cmd_seq_t
*
commands
)
sctp_disposition_t
sctp_sf_ignore_primitive
(
const
struct
sctp_endpoint
*
ep
,
const
struct
sctp_association
*
asoc
,
const
sctp_subtype_t
type
,
void
*
arg
,
sctp_cmd_seq_t
*
commands
)
{
SCTP_DEBUG_PRINTK
(
"Primitive type %d is ignored.
\n
"
,
type
.
primitive
);
return
SCTP_DISPOSITION_DISCARD
;
...
...
@@ -3895,13 +3908,14 @@ sctp_disposition_t sctp_sf_ignore_primitive(const sctp_endpoint_t *ep,
*
* The return value is the disposition.
*/
sctp_disposition_t
sctp_sf_do_9_2_start_shutdown
(
const
sctp_endpoint_t
*
ep
,
const
sctp_association_t
*
asoc
,
const
sctp_subtype_t
type
,
void
*
arg
,
sctp_cmd_seq_t
*
commands
)
sctp_disposition_t
sctp_sf_do_9_2_start_shutdown
(
const
struct
sctp_endpoint
*
ep
,
const
struct
sctp_association
*
asoc
,
const
sctp_subtype_t
type
,
void
*
arg
,
sctp_cmd_seq_t
*
commands
)
{
s
ctp_chunk_t
*
reply
;
s
truct
sctp_chunk
*
reply
;
/* Once all its outstanding data has been acknowledged, the
* endpoint shall send a SHUTDOWN chunk to its peer including
...
...
@@ -3956,14 +3970,15 @@ sctp_disposition_t sctp_sf_do_9_2_start_shutdown(const sctp_endpoint_t *ep,
*
* The return value is the disposition.
*/
sctp_disposition_t
sctp_sf_do_9_2_shutdown_ack
(
const
sctp_endpoint_t
*
ep
,
const
sctp_association_t
*
asoc
,
const
sctp_subtype_t
type
,
void
*
arg
,
sctp_cmd_seq_t
*
commands
)
sctp_disposition_t
sctp_sf_do_9_2_shutdown_ack
(
const
struct
sctp_endpoint
*
ep
,
const
struct
sctp_association
*
asoc
,
const
sctp_subtype_t
type
,
void
*
arg
,
sctp_cmd_seq_t
*
commands
)
{
s
ctp_chunk_t
*
chunk
=
(
sctp_chunk_t
*
)
arg
;
s
ctp_chunk_t
*
reply
;
s
truct
sctp_chunk
*
chunk
=
(
struct
sctp_chunk
*
)
arg
;
s
truct
sctp_chunk
*
reply
;
/* If it has no more outstanding DATA chunks, the SHUTDOWN receiver
* shall send a SHUTDOWN ACK ...
...
...
@@ -4009,8 +4024,8 @@ sctp_disposition_t sctp_sf_do_9_2_shutdown_ack(const sctp_endpoint_t *ep,
*
* The return value is the disposition of the event.
*/
sctp_disposition_t
sctp_sf_ignore_other
(
const
s
ctp_endpoint_
t
*
ep
,
const
s
ctp_association_t
*
asoc
,
sctp_disposition_t
sctp_sf_ignore_other
(
const
s
truct
sctp_endpoin
t
*
ep
,
const
s
truct
sctp_association
*
asoc
,
const
sctp_subtype_t
type
,
void
*
arg
,
sctp_cmd_seq_t
*
commands
)
...
...
@@ -4034,8 +4049,8 @@ sctp_disposition_t sctp_sf_ignore_other(const sctp_endpoint_t *ep,
*
* The return value is the disposition of the chunk.
*/
sctp_disposition_t
sctp_sf_do_6_3_3_rtx
(
const
s
ctp_endpoint_
t
*
ep
,
const
s
ctp_association_t
*
asoc
,
sctp_disposition_t
sctp_sf_do_6_3_3_rtx
(
const
s
truct
sctp_endpoin
t
*
ep
,
const
s
truct
sctp_association
*
asoc
,
const
sctp_subtype_t
type
,
void
*
arg
,
sctp_cmd_seq_t
*
commands
)
...
...
@@ -4044,7 +4059,7 @@ sctp_disposition_t sctp_sf_do_6_3_3_rtx(const sctp_endpoint_t *ep,
if
(
asoc
->
overall_error_count
>=
asoc
->
overall_error_threshold
)
{
/* CMD_ASSOC_FAILED calls CMD_DELETE_TCB. */
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_ASSOC_FAILED
,
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_ASSOC_FAILED
,
SCTP_U32
(
SCTP_ERROR_NO_ERROR
));
SCTP_INC_STATS
(
SctpAborteds
);
SCTP_DEC_STATS
(
SctpCurrEstab
);
...
...
@@ -4102,8 +4117,8 @@ sctp_disposition_t sctp_sf_do_6_3_3_rtx(const sctp_endpoint_t *ep,
* allow. However, an SCTP transmitter MUST NOT be more aggressive than
* the following algorithms allow.
*/
sctp_disposition_t
sctp_sf_do_6_2_sack
(
const
s
ctp_endpoint_
t
*
ep
,
const
s
ctp_association_t
*
asoc
,
sctp_disposition_t
sctp_sf_do_6_2_sack
(
const
s
truct
sctp_endpoin
t
*
ep
,
const
s
truct
sctp_association
*
asoc
,
const
sctp_subtype_t
type
,
void
*
arg
,
sctp_cmd_seq_t
*
commands
)
...
...
@@ -4137,14 +4152,14 @@ sctp_disposition_t sctp_sf_do_6_2_sack(const sctp_endpoint_t *ep,
* (timers, events)
*
*/
sctp_disposition_t
sctp_sf_t1_timer_expire
(
const
s
ctp_endpoint_
t
*
ep
,
const
s
ctp_association_t
*
asoc
,
sctp_disposition_t
sctp_sf_t1_timer_expire
(
const
s
truct
sctp_endpoin
t
*
ep
,
const
s
truct
sctp_association
*
asoc
,
const
sctp_subtype_t
type
,
void
*
arg
,
sctp_cmd_seq_t
*
commands
)
{
s
ctp_chunk_t
*
repl
;
s
ctp_bind_addr_t
*
bp
;
s
truct
sctp_chunk
*
repl
;
s
truct
sctp_bind_addr
*
bp
;
sctp_event_timeout_t
timer
=
(
sctp_event_timeout_t
)
arg
;
int
timeout
;
int
attempts
;
...
...
@@ -4159,7 +4174,7 @@ sctp_disposition_t sctp_sf_t1_timer_expire(const sctp_endpoint_t *ep,
(
attempts
<
asoc
->
max_init_attempts
))
{
switch
(
timer
)
{
case
SCTP_EVENT_TIMEOUT_T1_INIT
:
bp
=
(
s
ctp_bind_addr_t
*
)
&
asoc
->
base
.
bind_addr
;
bp
=
(
s
truct
sctp_bind_addr
*
)
&
asoc
->
base
.
bind_addr
;
repl
=
sctp_make_init
(
asoc
,
bp
,
GFP_ATOMIC
,
0
);
break
;
...
...
@@ -4180,7 +4195,7 @@ sctp_disposition_t sctp_sf_t1_timer_expire(const sctp_endpoint_t *ep,
SCTP_TO
(
timer
));
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_REPLY
,
SCTP_CHUNK
(
repl
));
}
else
{
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_INIT_FAILED
,
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_INIT_FAILED
,
SCTP_U32
(
SCTP_ERROR_NO_ERROR
));
return
SCTP_DISPOSITION_DELETE_TCB
;
}
...
...
@@ -4204,18 +4219,18 @@ sctp_disposition_t sctp_sf_t1_timer_expire(const sctp_endpoint_t *ep,
* the T2-Shutdown timer, giving its peer ample opportunity to transmit
* all of its queued DATA chunks that have not yet been sent.
*/
sctp_disposition_t
sctp_sf_t2_timer_expire
(
const
s
ctp_endpoint_
t
*
ep
,
const
s
ctp_association_t
*
asoc
,
sctp_disposition_t
sctp_sf_t2_timer_expire
(
const
s
truct
sctp_endpoin
t
*
ep
,
const
s
truct
sctp_association
*
asoc
,
const
sctp_subtype_t
type
,
void
*
arg
,
sctp_cmd_seq_t
*
commands
)
{
s
ctp_chunk_t
*
reply
=
NULL
;
s
truct
sctp_chunk
*
reply
=
NULL
;
SCTP_DEBUG_PRINTK
(
"Timer T2 expired.
\n
"
);
if
(
asoc
->
overall_error_count
>=
asoc
->
overall_error_threshold
)
{
/* Note: CMD_ASSOC_FAILED calls CMD_DELETE_TCB. */
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_ASSOC_FAILED
,
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_ASSOC_FAILED
,
SCTP_U32
(
SCTP_ERROR_NO_ERROR
));
SCTP_INC_STATS
(
SctpAborteds
);
SCTP_DEC_STATS
(
SctpCurrEstab
);
...
...
@@ -4264,13 +4279,13 @@ sctp_disposition_t sctp_sf_t2_timer_expire(const sctp_endpoint_t *ep,
* At the expiration of this timer the sender SHOULD abort the association
* by sending an ABORT chunk.
*/
sctp_disposition_t
sctp_sf_t5_timer_expire
(
const
s
ctp_endpoint_
t
*
ep
,
const
s
ctp_association_t
*
asoc
,
sctp_disposition_t
sctp_sf_t5_timer_expire
(
const
s
truct
sctp_endpoin
t
*
ep
,
const
s
truct
sctp_association
*
asoc
,
const
sctp_subtype_t
type
,
void
*
arg
,
sctp_cmd_seq_t
*
commands
)
{
s
ctp_chunk_t
*
reply
=
NULL
;
s
truct
sctp_chunk
*
reply
=
NULL
;
SCTP_DEBUG_PRINTK
(
"Timer T5 expired.
\n
"
);
...
...
@@ -4279,7 +4294,7 @@ sctp_disposition_t sctp_sf_t5_timer_expire(const sctp_endpoint_t *ep,
goto
nomem
;
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_REPLY
,
SCTP_CHUNK
(
reply
));
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_ASSOC_FAILED
,
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_ASSOC_FAILED
,
SCTP_U32
(
SCTP_ERROR_NO_ERROR
));
return
SCTP_DISPOSITION_DELETE_TCB
;
...
...
@@ -4292,11 +4307,12 @@ sctp_disposition_t sctp_sf_t5_timer_expire(const sctp_endpoint_t *ep,
* The work that needs to be done is same as when SHUTDOWN is initiated by
* the user. So this routine looks same as sctp_sf_do_9_2_prm_shutdown().
*/
sctp_disposition_t
sctp_sf_autoclose_timer_expire
(
const
sctp_endpoint_t
*
ep
,
const
sctp_association_t
*
asoc
,
const
sctp_subtype_t
type
,
void
*
arg
,
sctp_cmd_seq_t
*
commands
)
sctp_disposition_t
sctp_sf_autoclose_timer_expire
(
const
struct
sctp_endpoint
*
ep
,
const
struct
sctp_association
*
asoc
,
const
sctp_subtype_t
type
,
void
*
arg
,
sctp_cmd_seq_t
*
commands
)
{
int
disposition
;
...
...
@@ -4337,8 +4353,8 @@ sctp_disposition_t sctp_sf_autoclose_timer_expire(const sctp_endpoint_t *ep,
*
* The return value is the disposition of the chunk.
*/
sctp_disposition_t
sctp_sf_not_impl
(
const
s
ctp_endpoint_
t
*
ep
,
const
s
ctp_association_t
*
asoc
,
sctp_disposition_t
sctp_sf_not_impl
(
const
s
truct
sctp_endpoin
t
*
ep
,
const
s
truct
sctp_association
*
asoc
,
const
sctp_subtype_t
type
,
void
*
arg
,
sctp_cmd_seq_t
*
commands
)
...
...
@@ -4354,8 +4370,8 @@ sctp_disposition_t sctp_sf_not_impl(const sctp_endpoint_t *ep,
*
* The return value is the disposition of the chunk.
*/
sctp_disposition_t
sctp_sf_bug
(
const
s
ctp_endpoint_
t
*
ep
,
const
s
ctp_association_t
*
asoc
,
sctp_disposition_t
sctp_sf_bug
(
const
s
truct
sctp_endpoin
t
*
ep
,
const
s
truct
sctp_association
*
asoc
,
const
sctp_subtype_t
type
,
void
*
arg
,
sctp_cmd_seq_t
*
commands
)
...
...
@@ -4374,8 +4390,8 @@ sctp_disposition_t sctp_sf_bug(const sctp_endpoint_t *ep,
*
* The return value is the disposition of the chunk.
*/
sctp_disposition_t
sctp_sf_timer_ignore
(
const
s
ctp_endpoint_
t
*
ep
,
const
s
ctp_association_t
*
asoc
,
sctp_disposition_t
sctp_sf_timer_ignore
(
const
s
truct
sctp_endpoin
t
*
ep
,
const
s
truct
sctp_association
*
asoc
,
const
sctp_subtype_t
type
,
void
*
arg
,
sctp_cmd_seq_t
*
commands
)
...
...
@@ -4389,7 +4405,7 @@ sctp_disposition_t sctp_sf_timer_ignore(const sctp_endpoint_t *ep,
********************************************************************/
/* Pull the SACK chunk based on the SACK header. */
sctp_sackhdr_t
*
sctp_sm_pull_sack
(
s
ctp_chunk_t
*
chunk
)
sctp_sackhdr_t
*
sctp_sm_pull_sack
(
s
truct
sctp_chunk
*
chunk
)
{
sctp_sackhdr_t
*
sack
;
__u16
num_blocks
;
...
...
@@ -4411,14 +4427,14 @@ sctp_sackhdr_t *sctp_sm_pull_sack(sctp_chunk_t *chunk)
/* Create an ABORT packet to be sent as a response, with the specified
* error causes.
*/
struct
sctp_packet
*
sctp_abort_pkt_new
(
const
s
ctp_endpoint_
t
*
ep
,
const
s
ctp_association_t
*
asoc
,
s
ctp_chunk_t
*
chunk
,
struct
sctp_packet
*
sctp_abort_pkt_new
(
const
s
truct
sctp_endpoin
t
*
ep
,
const
s
truct
sctp_association
*
asoc
,
s
truct
sctp_chunk
*
chunk
,
const
void
*
payload
,
size_t
paylen
)
{
struct
sctp_packet
*
packet
;
s
ctp_chunk_t
*
abort
;
s
truct
sctp_chunk
*
abort
;
packet
=
sctp_ootb_pkt_new
(
asoc
,
chunk
);
...
...
@@ -4447,8 +4463,8 @@ struct sctp_packet *sctp_abort_pkt_new(const sctp_endpoint_t *ep,
}
/* Allocate a packet for responding in the OOTB conditions. */
struct
sctp_packet
*
sctp_ootb_pkt_new
(
const
s
ctp_association_t
*
asoc
,
const
s
ctp_chunk_t
*
chunk
)
struct
sctp_packet
*
sctp_ootb_pkt_new
(
const
s
truct
sctp_association
*
asoc
,
const
s
truct
sctp_chunk
*
chunk
)
{
struct
sctp_packet
*
packet
;
struct
sctp_transport
*
transport
;
...
...
@@ -4512,11 +4528,11 @@ void sctp_ootb_pkt_free(struct sctp_packet *packet)
}
/* Send a stale cookie error when a invalid COOKIE ECHO chunk is found */
void
sctp_send_stale_cookie_err
(
const
s
ctp_endpoint_
t
*
ep
,
const
s
ctp_association_t
*
asoc
,
const
s
ctp_chunk_t
*
chunk
,
void
sctp_send_stale_cookie_err
(
const
s
truct
sctp_endpoin
t
*
ep
,
const
s
truct
sctp_association
*
asoc
,
const
s
truct
sctp_chunk
*
chunk
,
sctp_cmd_seq_t
*
commands
,
s
ctp_chunk_t
*
err_chunk
)
s
truct
sctp_chunk
*
err_chunk
)
{
struct
sctp_packet
*
packet
;
...
...
@@ -4530,6 +4546,6 @@ void sctp_send_stale_cookie_err(const sctp_endpoint_t *ep,
SCTP_PACKET
(
packet
));
SCTP_INC_STATS
(
SctpOutCtrlChunks
);
}
else
sctp_
free_chunk
(
err_chunk
);
sctp_
chunk_free
(
err_chunk
);
}
}
net/sctp/socket.c
View file @
3e446c25
...
...
@@ -55,7 +55,6 @@
#include <linux/config.h>
#include <linux/types.h>
#include <linux/compiler.h>
#include <linux/kernel.h>
#include <linux/wait.h>
#include <linux/time.h>
...
...
@@ -63,6 +62,7 @@
#include <linux/fcntl.h>
#include <linux/poll.h>
#include <linux/init.h>
#include <linux/crypto.h>
#include <net/ip.h>
#include <net/icmp.h>
...
...
@@ -82,13 +82,14 @@
/* Forward declarations for internal helper functions. */
static
int
sctp_writeable
(
struct
sock
*
sk
);
static
inline
int
sctp_wspace
(
struct
sctp_association
*
asoc
);
static
inline
void
sctp_set_owner_w
(
s
ctp_chunk_t
*
chunk
);
static
inline
void
sctp_set_owner_w
(
s
truct
sctp_chunk
*
chunk
);
static
void
sctp_wfree
(
struct
sk_buff
*
skb
);
static
int
sctp_wait_for_sndbuf
(
struct
sctp_association
*
,
long
*
timeo_p
,
int
msg_len
);
static
int
sctp_wait_for_packet
(
struct
sock
*
sk
,
int
*
err
,
long
*
timeo_p
);
static
int
sctp_wait_for_connect
(
struct
sctp_association
*
,
long
*
timeo_p
);
static
int
sctp_wait_for_accept
(
struct
sock
*
sk
,
long
timeo
);
static
void
sctp_wait_for_close
(
struct
sock
*
sk
,
long
timeo
);
static
inline
int
sctp_verify_addr
(
struct
sock
*
,
union
sctp_addr
*
,
int
);
static
int
sctp_bindx_add
(
struct
sock
*
,
struct
sockaddr_storage
*
,
int
);
static
int
sctp_bindx_rem
(
struct
sock
*
,
struct
sockaddr_storage
*
,
int
);
...
...
@@ -96,31 +97,33 @@ static int sctp_do_bind(struct sock *, union sctp_addr *, int);
static
int
sctp_autobind
(
struct
sock
*
sk
);
static
void
sctp_sock_migrate
(
struct
sock
*
,
struct
sock
*
,
struct
sctp_association
*
,
sctp_socket_type_t
);
static
char
*
sctp_hmac_alg
=
SCTP_COOKIE_HMAC_ALG
;
/* Look up the association by its id. If this is not a UDP-style
* socket, the ID field is always ignored.
*/
s
ctp_association_t
*
sctp_id2assoc
(
struct
sock
*
sk
,
sctp_assoc_t
id
)
s
truct
sctp_association
*
sctp_id2assoc
(
struct
sock
*
sk
,
sctp_assoc_t
id
)
{
s
ctp_association_t
*
asoc
=
NULL
;
s
truct
sctp_association
*
asoc
=
NULL
;
/* If this is not a UDP-style socket, assoc id should be
/* If this is not a UDP-style socket, assoc id should be
* ignored.
*/
if
(
SCTP_SOCKET_UDP
!=
sctp_sk
(
sk
)
->
type
)
{
if
(
!
sctp_style
(
sk
,
UDP
)
)
{
if
(
!
list_empty
(
&
sctp_sk
(
sk
)
->
ep
->
asocs
))
asoc
=
list_entry
(
sctp_sk
(
sk
)
->
ep
->
asocs
.
next
,
s
ctp_association_t
,
asocs
);
s
truct
sctp_association
,
asocs
);
return
asoc
;
}
/* First, verify that this is a kernel address. */
if
(
sctp_is_valid_kaddr
((
unsigned
long
)
id
))
{
s
ctp_association_t
*
temp
=
(
sctp_association_t
*
)
id
;
s
truct
sctp_association
*
temp
;
/* Verify that this _is_ an sctp_association
_t
/* Verify that this _is_ an sctp_association
* data structure and if so, that the socket matches.
*/
temp
=
(
struct
sctp_association
*
)
id
;
if
((
SCTP_ASSOC_EYECATCHER
==
temp
->
eyecatcher
)
&&
(
temp
->
base
.
sk
==
sk
))
asoc
=
temp
;
...
...
@@ -188,13 +191,12 @@ static struct sctp_af *sctp_sockaddr_af(struct sctp_opt *opt,
return
af
;
}
/* Bind a local address either to an endpoint or to an association. */
SCTP_STATIC
int
sctp_do_bind
(
struct
sock
*
sk
,
union
sctp_addr
*
addr
,
int
len
)
{
struct
sctp_opt
*
sp
=
sctp_sk
(
sk
);
s
ctp_endpoint_
t
*
ep
=
sp
->
ep
;
s
ctp_bind_addr_t
*
bp
=
&
ep
->
base
.
bind_addr
;
s
truct
sctp_endpoin
t
*
ep
=
sp
->
ep
;
s
truct
sctp_bind_addr
*
bp
=
&
ep
->
base
.
bind_addr
;
struct
sctp_af
*
af
;
unsigned
short
snum
;
int
ret
=
0
;
...
...
@@ -260,7 +262,7 @@ SCTP_STATIC int sctp_do_bind(struct sock *sk, union sctp_addr *addr, int len)
/* Copy back into socket for getsockname() use. */
if
(
!
ret
)
{
inet_sk
(
sk
)
->
sport
=
htons
(
inet_sk
(
sk
)
->
num
);
af
->
to_sk
(
addr
,
sk
);
af
->
to_sk
_saddr
(
addr
,
sk
);
}
return
ret
;
...
...
@@ -450,12 +452,12 @@ int sctp_bindx_add(struct sock *sk, struct sockaddr_storage *addrs, int addrcnt)
/* Add these addresses to all associations on this endpoint. */
if
(
retval
>=
0
)
{
struct
list_head
*
pos
;
s
ctp_endpoint_
t
*
ep
;
s
ctp_association_t
*
asoc
;
s
truct
sctp_endpoin
t
*
ep
;
s
truct
sctp_association
*
asoc
;
ep
=
sctp_sk
(
sk
)
->
ep
;
list_for_each
(
pos
,
&
ep
->
asocs
)
{
asoc
=
list_entry
(
pos
,
s
ctp_association_t
,
asocs
);
asoc
=
list_entry
(
pos
,
s
truct
sctp_association
,
asocs
);
sctp_addip_addr_config
(
asoc
,
SCTP_PARAM_ADD_IP
,
...
...
@@ -485,9 +487,9 @@ int sctp_bindx_add(struct sock *sk, struct sockaddr_storage *addrs, int addrcnt)
int
sctp_bindx_rem
(
struct
sock
*
sk
,
struct
sockaddr_storage
*
addrs
,
int
addrcnt
)
{
struct
sctp_opt
*
sp
=
sctp_sk
(
sk
);
s
ctp_endpoint_
t
*
ep
=
sp
->
ep
;
s
truct
sctp_endpoin
t
*
ep
=
sp
->
ep
;
int
cnt
;
s
ctp_bind_addr_t
*
bp
=
&
ep
->
base
.
bind_addr
;
s
truct
sctp_bind_addr
*
bp
=
&
ep
->
base
.
bind_addr
;
int
retval
=
0
;
union
sctp_addr
saveaddr
;
...
...
@@ -570,12 +572,12 @@ int sctp_bindx_rem(struct sock *sk, struct sockaddr_storage *addrs, int addrcnt)
/* Remove these addresses from all associations on this endpoint. */
if
(
retval
>=
0
)
{
struct
list_head
*
pos
;
s
ctp_endpoint_
t
*
ep
;
s
ctp_association_t
*
asoc
;
s
truct
sctp_endpoin
t
*
ep
;
s
truct
sctp_association
*
asoc
;
ep
=
sctp_sk
(
sk
)
->
ep
;
list_for_each
(
pos
,
&
ep
->
asocs
)
{
asoc
=
list_entry
(
pos
,
s
ctp_association_t
,
asocs
);
asoc
=
list_entry
(
pos
,
s
truct
sctp_association
,
asocs
);
sctp_addip_addr_config
(
asoc
,
SCTP_PARAM_DEL_IP
,
addrs
,
addrcnt
);
}
...
...
@@ -637,7 +639,7 @@ SCTP_STATIC int sctp_setsockopt_bindx(struct sock* sk,
/* Alloc space for the address array in kernel memory. */
kaddrs
=
(
struct
sockaddr_storage
*
)
kmalloc
(
addrssize
,
GFP_KERNEL
);
if
(
unlikely
(
NULL
==
kaddrs
))
if
(
unlikely
(
!
kaddrs
))
return
-
ENOMEM
;
if
(
copy_from_user
(
kaddrs
,
addrs
,
addrssize
))
{
...
...
@@ -670,14 +672,45 @@ SCTP_STATIC int sctp_setsockopt_bindx(struct sock* sk,
*
* If sd in the close() call is a branched-off socket representing only
* one association, the shutdown is performed on that association only.
*
* 4.1.6 close() - TCP Style Syntax
*
* Applications use close() to gracefully close down an association.
*
* The syntax is:
*
* int close(int sd);
*
* sd - the socket descriptor of the association to be closed.
*
* After an application calls close() on a socket descriptor, no further
* socket operations will succeed on that descriptor.
*
* API 7.1.4 SO_LINGER
*
* An application using the TCP-style socket can use this option to
* perform the SCTP ABORT primitive. The linger option structure is:
*
* struct linger {
* int l_onoff; // option on/off
* int l_linger; // linger time
* };
*
* To enable the option, set l_onoff to 1. If the l_linger value is set
* to 0, calling close() is the same as the ABORT primitive. If the
* value is set to a negative value, the setsockopt() call will return
* an error. If the value is set to a positive value linger_time, the
* close() can be blocked for at most linger_time ms. If the graceful
* shutdown phase does not finish during this period, close() will
* return but the graceful shutdown phase continues in the system.
*/
SCTP_STATIC
void
sctp_close
(
struct
sock
*
sk
,
long
timeout
)
{
s
ctp_endpoint_
t
*
ep
;
s
ctp_association_t
*
asoc
;
s
truct
sctp_endpoin
t
*
ep
;
s
truct
sctp_association
*
asoc
;
struct
list_head
*
pos
,
*
temp
;
SCTP_DEBUG_PRINTK
(
"sctp_close(sk: 0x%p...)
\n
"
,
sk
);
printk
(
"sctp_close(sk: 0x%p, timeout:%ld)
\n
"
,
sk
,
timeout
);
sctp_lock_sock
(
sk
);
sk
->
shutdown
=
SHUTDOWN_MASK
;
...
...
@@ -686,14 +719,35 @@ SCTP_STATIC void sctp_close(struct sock *sk, long timeout)
/* Walk all associations on a socket, not on an endpoint. */
list_for_each_safe
(
pos
,
temp
,
&
ep
->
asocs
)
{
asoc
=
list_entry
(
pos
,
sctp_association_t
,
asocs
);
sctp_primitive_SHUTDOWN
(
asoc
,
NULL
);
asoc
=
list_entry
(
pos
,
struct
sctp_association
,
asocs
);
if
(
sctp_style
(
sk
,
TCP
))
{
/* A closed association can still be in the list if
* it belongs to a TCP-style listening socket that is
* not yet accepted. If so, free it. If not, send an
* ABORT or SHUTDOWN based on the linger options.
*/
if
(
sctp_state
(
asoc
,
CLOSED
))
{
sctp_unhash_established
(
asoc
);
sctp_association_free
(
asoc
);
}
else
if
(
test_bit
(
SOCK_LINGER
,
&
sk
->
flags
)
&&
!
sk
->
lingertime
)
sctp_primitive_ABORT
(
asoc
,
NULL
);
else
sctp_primitive_SHUTDOWN
(
asoc
,
NULL
);
}
else
sctp_primitive_SHUTDOWN
(
asoc
,
NULL
);
}
/* Clean up any skbs sitting on the receive queue. */
skb_queue_purge
(
&
sk
->
receive_queue
);
skb_queue_purge
(
&
sctp_sk
(
sk
)
->
pd_lobby
);
/* On a TCP-style socket, block for at most linger_time if set. */
if
(
sctp_style
(
sk
,
TCP
)
&&
timeout
)
sctp_wait_for_close
(
sk
,
timeout
);
/* This will run the backlog queue. */
sctp_release_sock
(
sk
);
...
...
@@ -717,6 +771,16 @@ SCTP_STATIC void sctp_close(struct sock *sk, long timeout)
SCTP_DBG_OBJCNT_DEC
(
sock
);
}
/* Handle EPIPE error. */
static
int
sctp_error
(
struct
sock
*
sk
,
int
flags
,
int
err
)
{
if
(
err
==
-
EPIPE
)
err
=
sock_error
(
sk
)
?
:
-
EPIPE
;
if
(
err
==
-
EPIPE
&&
!
(
flags
&
MSG_NOSIGNAL
))
send_sig
(
SIGPIPE
,
current
,
0
);
return
err
;
}
/* API 3.1.3 sendmsg() - UDP Style Syntax
*
* An application uses sendmsg() and recvmsg() calls to transmit data to
...
...
@@ -746,10 +810,10 @@ SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
struct
msghdr
*
msg
,
int
msg_len
)
{
struct
sctp_opt
*
sp
;
s
ctp_endpoint_
t
*
ep
;
s
ctp_association_t
*
new_asoc
=
NULL
,
*
asoc
=
NULL
;
struct
sctp_transport
*
transport
;
s
ctp_chunk_t
*
chunk
=
NULL
;
s
truct
sctp_endpoin
t
*
ep
;
s
truct
sctp_association
*
new_asoc
=
NULL
,
*
asoc
=
NULL
;
struct
sctp_transport
*
transport
,
*
chunk_tp
;
s
truct
sctp_chunk
*
chunk
;
union
sctp_addr
to
;
struct
sockaddr
*
msg_name
=
NULL
;
struct
sctp_sndrcvinfo
default_sinfo
=
{
0
};
...
...
@@ -761,7 +825,9 @@ SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
sctp_scope_t
scope
;
long
timeo
;
__u16
sinfo_flags
=
0
;
struct
sk_buff_head
chunks
;
struct
sctp_datamsg
*
datamsg
;
struct
list_head
*
pos
;
int
msg_flags
=
msg
->
msg_flags
;
SCTP_DEBUG_PRINTK
(
"sctp_sendmsg(sk: %p, msg: %p, msg_len: %d)
\n
"
,
sk
,
msg
,
msg_len
);
...
...
@@ -772,6 +838,12 @@ SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
SCTP_DEBUG_PRINTK
(
"Using endpoint: %s.
\n
"
,
ep
->
debug_name
);
/* We cannot send a message over a TCP-style listening socket. */
if
(
sctp_style
(
sk
,
TCP
)
&&
sctp_sstate
(
sk
,
LISTENING
))
{
err
=
-
EPIPE
;
goto
out_nounlock
;
}
/* Parse out the SCTP CMSGs. */
err
=
sctp_msghdr_parse
(
msg
,
&
cmsgs
);
...
...
@@ -785,7 +857,7 @@ SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
* the address we will send to.
* For a peeled-off socket, msg_name is ignored.
*/
if
(
(
SCTP_SOCKET_UDP_HIGH_BANDWIDTH
!=
sp
->
type
)
&&
msg
->
msg_name
)
{
if
(
!
sctp_style
(
sk
,
UDP_HIGH_BANDWIDTH
)
&&
msg
->
msg_name
)
{
int
msg_namelen
=
msg
->
msg_namelen
;
err
=
sctp_verify_addr
(
sk
,
(
union
sctp_addr
*
)
msg
->
msg_name
,
...
...
@@ -816,6 +888,12 @@ SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
SCTP_DEBUG_PRINTK
(
"msg_len: %Zd, sinfo_flags: 0x%x
\n
"
,
msg_len
,
sinfo_flags
);
/* MSG_EOF or MSG_ABORT cannot be set on a TCP-style socket. */
if
(
sctp_style
(
sk
,
TCP
)
&&
(
sinfo_flags
&
(
MSG_EOF
|
MSG_ABORT
)))
{
err
=
-
EINVAL
;
goto
out_nounlock
;
}
/* If MSG_EOF is set, no data can be sent. Disallow sending zero
* length messages when MSG_EOF|MSG_ABORT is not set.
* If MSG_ABORT is set, the message length could be non zero with
...
...
@@ -827,22 +905,33 @@ SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
goto
out_nounlock
;
}
sctp_lock_sock
(
sk
);
/* If MSG_ADDR_OVER is set, there must be an address
* specified in msg_name.
*/
if
((
sinfo_flags
&
MSG_ADDR_OVER
)
&&
(
!
msg
->
msg_name
))
{
err
=
-
EINVAL
;
goto
out_nounlock
;
}
transport
=
NULL
;
SCTP_DEBUG_PRINTK
(
"About to look up association.
\n
"
);
sctp_lock_sock
(
sk
);
/* If a msg_name has been specified, assume this is to be used. */
if
(
msg_name
)
{
/* Look for a matching association on the endpoint. */
asoc
=
sctp_endpoint_lookup_assoc
(
ep
,
&
to
,
&
transport
);
if
(
!
asoc
)
{
/* If we could not find a matching association on the
* endpoint, make sure that there is no peeled-off
* association on another socket.
* endpoint, make sure that it is not a TCP-style
* socket that already has an association or there is
* no peeled-off association on another socket.
*/
if
(
sctp_endpoint_is_peeled_off
(
ep
,
&
to
))
{
if
((
sctp_style
(
sk
,
TCP
)
&&
sctp_sstate
(
sk
,
ESTABLISHED
))
||
sctp_endpoint_is_peeled_off
(
ep
,
&
to
))
{
err
=
-
EADDRNOTAVAIL
;
goto
out_unlock
;
}
...
...
@@ -850,14 +939,24 @@ SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
}
else
{
asoc
=
sctp_id2assoc
(
sk
,
associd
);
if
(
!
asoc
)
{
err
=
-
E
INVAL
;
err
=
-
E
PIPE
;
goto
out_unlock
;
}
}
if
(
asoc
)
{
SCTP_DEBUG_PRINTK
(
"Just looked up association: "
"%s.
\n
"
,
asoc
->
debug_name
);
SCTP_DEBUG_PRINTK
(
"Just looked up association: %p.
\n
"
,
asoc
);
/* We cannot send a message on a TCP-style SCTP_SS_ESTABLISHED
* socket that has an association in CLOSED state. This can
* happen when an accepted socket has an association that is
* already CLOSED.
*/
if
(
sctp_state
(
asoc
,
CLOSED
)
&&
sctp_style
(
sk
,
TCP
))
{
err
=
-
EPIPE
;
goto
out_unlock
;
}
if
(
sinfo_flags
&
MSG_EOF
)
{
SCTP_DEBUG_PRINTK
(
"Shutting down association: %p
\n
"
,
asoc
);
...
...
@@ -1004,69 +1103,76 @@ SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
goto
out_free
;
}
/* Break the message into multiple chunks of maximum size. */
skb_queue_head_init
(
&
chunks
);
err
=
sctp_datachunks_from_user
(
asoc
,
sinfo
,
msg
,
msg_len
,
&
chunks
);
if
(
err
)
goto
out_free
;
/* If an address is passed with the sendto/sendmsg call, it is used
* to override the primary destination address in the TCP model, or
* when MSG_ADDR_OVER flag is set in the UDP model.
*/
if
((
sctp_style
(
sk
,
TCP
)
&&
msg_name
)
||
(
sinfo_flags
&
MSG_ADDR_OVER
))
{
chunk_tp
=
sctp_assoc_lookup_paddr
(
asoc
,
&
to
);
if
(
!
chunk_tp
)
{
err
=
-
EINVAL
;
goto
out_free
;
}
}
else
chunk_tp
=
NULL
;
/* Auto-connect, if we aren't connected already. */
if
(
SCTP_STATE_CLOSED
==
asoc
->
state
)
{
if
(
sctp_state
(
asoc
,
CLOSED
)
)
{
err
=
sctp_primitive_ASSOCIATE
(
asoc
,
NULL
);
if
(
err
<
0
)
goto
out_free
;
SCTP_DEBUG_PRINTK
(
"We associated primitively.
\n
"
);
}
/* Break the message into multiple chunks of maximum size. */
datamsg
=
sctp_datamsg_from_user
(
asoc
,
sinfo
,
msg
,
msg_len
);
if
(
!
datamsg
)
{
err
=
-
ENOMEM
;
goto
out_free
;
}
/* Now send the (possibly) fragmented message. */
while
((
chunk
=
(
sctp_chunk_t
*
)
__skb_dequeue
(
&
chunks
)))
{
list_for_each
(
pos
,
&
datamsg
->
chunks
)
{
chunk
=
list_entry
(
pos
,
struct
sctp_chunk
,
frag_list
);
sctp_datamsg_track
(
chunk
);
/* Do accounting for the write space. */
sctp_set_owner_w
(
chunk
);
/* This flag, in the UDP model, requests the SCTP stack to
* override the primary destination address with the
* address found with the sendto/sendmsg call.
*/
if
(
sinfo_flags
&
MSG_ADDR_OVER
)
{
if
(
!
msg
->
msg_name
)
{
err
=
-
EINVAL
;
goto
out_free
;
}
chunk
->
transport
=
sctp_assoc_lookup_paddr
(
asoc
,
&
to
);
if
(
!
chunk
->
transport
)
{
err
=
-
EINVAL
;
goto
out_free
;
}
}
chunk
->
transport
=
chunk_tp
;
/* Send it to the lower layers. */
sctp_primitive_SEND
(
asoc
,
chunk
);
/* Send it to the lower layers. Note: all chunks
* must either fail or succeed. The lower layer
* works that way today. Keep it that way or this
* breaks.
*/
err
=
sctp_primitive_SEND
(
asoc
,
chunk
);
/* Did the lower layer accept the chunk? */
if
(
err
)
sctp_chunk_free
(
chunk
);
SCTP_DEBUG_PRINTK
(
"We sent primitively.
\n
"
);
}
if
(
!
err
)
{
sctp_datamsg_free
(
datamsg
);
if
(
err
)
goto
out_free
;
else
err
=
msg_len
;
goto
out_unlock
;
}
/* If we are already past ASSOCIATE, the lower
* layers are responsible for association cleanup.
*/
goto
out_
free_chun
k
;
goto
out_
unloc
k
;
out_free:
if
(
new_asoc
)
sctp_association_free
(
asoc
);
out_free_chunk:
if
(
chunk
)
sctp_free_chunk
(
chunk
);
out_unlock:
sctp_release_sock
(
sk
);
out_nounlock:
return
err
;
return
sctp_error
(
sk
,
msg_flags
,
err
)
;
#if 0
do_sock_err:
...
...
@@ -1134,8 +1240,9 @@ static int sctp_skb_pull(struct sk_buff *skb, int len)
*/
static
struct
sk_buff
*
sctp_skb_recv_datagram
(
struct
sock
*
,
int
,
int
,
int
*
);
SCTP_STATIC
int
sctp_recvmsg
(
struct
kiocb
*
iocb
,
struct
sock
*
sk
,
struct
msghdr
*
msg
,
int
len
,
int
noblock
,
int
flags
,
int
*
addr_len
)
SCTP_STATIC
int
sctp_recvmsg
(
struct
kiocb
*
iocb
,
struct
sock
*
sk
,
struct
msghdr
*
msg
,
int
len
,
int
noblock
,
int
flags
,
int
*
addr_len
)
{
struct
sctp_ulpevent
*
event
=
NULL
;
struct
sctp_opt
*
sp
=
sctp_sk
(
sk
);
...
...
@@ -1151,12 +1258,11 @@ SCTP_STATIC int sctp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr
sctp_lock_sock
(
sk
);
if
((
SCTP_SOCKET_TCP
==
sp
->
type
)
&&
(
SCTP_SS_ESTABLISHED
!=
sk
->
state
))
{
if
(
sctp_style
(
sk
,
TCP
)
&&
!
sctp_sstate
(
sk
,
ESTABLISHED
))
{
err
=
-
ENOTCONN
;
goto
out
;
}
skb
=
sctp_skb_recv_datagram
(
sk
,
flags
,
noblock
,
&
err
);
if
(
!
skb
)
goto
out
;
...
...
@@ -1259,7 +1365,7 @@ static int sctp_setsockopt_autoclose(struct sock *sk, char *optval,
struct
sctp_opt
*
sp
=
sctp_sk
(
sk
);
/* Applicable to UDP-style socket only */
if
(
SCTP_SOCKET_TCP
==
sp
->
type
)
if
(
sctp_style
(
sk
,
TCP
)
)
return
-
EOPNOTSUPP
;
if
(
optlen
!=
sizeof
(
int
))
return
-
EINVAL
;
...
...
@@ -1271,10 +1377,10 @@ static int sctp_setsockopt_autoclose(struct sock *sk, char *optval,
}
static
int
sctp_setsockopt_peer_addr_params
(
struct
sock
*
sk
,
char
*
optval
,
int
optlen
)
char
*
optval
,
int
optlen
)
{
struct
sctp_paddrparams
params
;
s
ctp_association_t
*
asoc
;
s
truct
sctp_association
*
asoc
;
union
sctp_addr
*
addr
;
struct
sctp_transport
*
trans
;
int
error
;
...
...
@@ -1329,8 +1435,7 @@ static int sctp_setsockopt_peer_addr_params(struct sock *sk,
return
0
;
}
static
int
sctp_setsockopt_initmsg
(
struct
sock
*
sk
,
char
*
optval
,
int
optlen
)
static
int
sctp_setsockopt_initmsg
(
struct
sock
*
sk
,
char
*
optval
,
int
optlen
)
{
if
(
optlen
!=
sizeof
(
struct
sctp_initmsg
))
return
-
EINVAL
;
...
...
@@ -1340,7 +1445,6 @@ static int sctp_setsockopt_initmsg(struct sock *sk, char *optval,
}
/*
*
* 7.1.15 Set default send parameters (SET_DEFAULT_SEND_PARAM)
*
* Applications that wish to use the sendto() system call may wish to
...
...
@@ -1358,7 +1462,7 @@ static int sctp_setsockopt_default_send_param(struct sock *sk,
char
*
optval
,
int
optlen
)
{
struct
sctp_sndrcvinfo
info
;
s
ctp_association_t
*
asoc
;
s
truct
sctp_association
*
asoc
;
if
(
optlen
!=
sizeof
(
struct
sctp_sndrcvinfo
))
return
-
EINVAL
;
...
...
@@ -1413,7 +1517,6 @@ static int sctp_setsockopt_peer_prim(struct sock *sk, char *optval, int optlen)
}
/*
*
* 7.1.5 SCTP_NODELAY
*
* Turn on/off any Nagle-like algorithm. This means that packets are
...
...
@@ -1424,15 +1527,61 @@ static int sctp_setsockopt_peer_prim(struct sock *sk, char *optval, int optlen)
static
int
sctp_setsockopt_nodelay
(
struct
sock
*
sk
,
char
*
optval
,
int
optlen
)
{
__u8
val
;
int
val
;
if
(
optlen
<
sizeof
(
__u8
))
if
(
optlen
<
sizeof
(
int
))
return
-
EINVAL
;
if
(
get_user
(
val
,
(
__u8
*
)
optval
))
if
(
get_user
(
val
,
(
int
*
)
optval
))
return
-
EFAULT
;
sctp_sk
(
sk
)
->
nodelay
=
(
val
==
0
)
?
0
:
1
;
return
0
;
}
/*
* 7.1.16 Set/clear IPv4 mapped addresses (SCTP_I_WANT_MAPPED_V4_ADDR)
*
* This socket option is a boolean flag which turns on or off mapped V4
* addresses. If this option is turned on and the socket is type
* PF_INET6, then IPv4 addresses will be mapped to V6 representation.
* If this option is turned off, then no mapping will be done of V4
* addresses and a user will receive both PF_INET6 and PF_INET type
* addresses on the socket.
*/
static
int
sctp_setsockopt_mappedv4
(
struct
sock
*
sk
,
char
*
optval
,
int
optlen
)
{
int
val
;
if
(
optlen
<
sizeof
(
int
))
return
-
EINVAL
;
if
(
get_user
(
val
,
(
int
*
)
optval
))
return
-
EFAULT
;
/* FIXME: Put real support here. */
return
-
ENOPROTOOPT
;
}
/*
* 7.1.17 Set the maximum fragrmentation size (SCTP_MAXSEG)
*
* This socket option specifies the maximum size to put in any outgoing
* SCTP chunk. If a message is larger than this size it will be
* fragmented by SCTP into the specified size. Note that the underlying
* SCTP implementation may fragment into smaller sized chunks when the
* PMTU of the underlying association is smaller than the value set by
* the user.
*/
static
int
sctp_setsockopt_maxseg
(
struct
sock
*
sk
,
char
*
optval
,
int
optlen
)
{
int
val
;
if
(
optlen
<
sizeof
(
int
))
return
-
EINVAL
;
if
(
get_user
(
val
,
(
int
*
)
optval
))
return
-
EFAULT
;
if
((
val
<
8
)
||
(
val
>
SCTP_MAX_CHUNK_LEN
))
return
-
EINVAL
;
sctp_sk
(
sk
)
->
user_frag
=
val
;
return
0
;
}
...
...
@@ -1526,20 +1675,22 @@ SCTP_STATIC int sctp_setsockopt(struct sock *sk, int level, int optname,
case
SCTP_INITMSG
:
retval
=
sctp_setsockopt_initmsg
(
sk
,
optval
,
optlen
);
break
;
case
SCTP_SET_DEFAULT_SEND_PARAM
:
retval
=
sctp_setsockopt_default_send_param
(
sk
,
optval
,
optlen
);
break
;
case
SCTP_SET_PEER_PRIMARY_ADDR
:
retval
=
sctp_setsockopt_peer_prim
(
sk
,
optval
,
optlen
);
break
;
case
SCTP_NODELAY
:
retval
=
sctp_setsockopt_nodelay
(
sk
,
optval
,
optlen
);
break
;
case
SCTP_I_WANT_MAPPED_V4_ADDR
:
retval
=
sctp_setsockopt_mappedv4
(
sk
,
optval
,
optlen
);
break
;
case
SCTP_MAXSEG
:
retval
=
sctp_setsockopt_maxseg
(
sk
,
optval
,
optlen
);
break
;
default:
retval
=
-
ENOPROTOOPT
;
break
;
...
...
@@ -1572,10 +1723,11 @@ SCTP_STATIC int sctp_connect(struct sock *sk, struct sockaddr *uaddr,
int
addr_len
)
{
struct
sctp_opt
*
sp
;
s
ctp_endpoint_
t
*
ep
;
s
ctp_association_t
*
asoc
;
s
truct
sctp_endpoin
t
*
ep
;
s
truct
sctp_association
*
asoc
;
struct
sctp_transport
*
transport
;
union
sctp_addr
to
;
struct
sctp_af
*
af
;
sctp_scope_t
scope
;
long
timeo
;
int
err
=
0
;
...
...
@@ -1590,12 +1742,11 @@ SCTP_STATIC int sctp_connect(struct sock *sk, struct sockaddr *uaddr,
/* connect() cannot be done on a socket that is already in ESTABLISHED
* state - UDP-style peeled off socket or a TCP-style socket that
* is already connected.
* is already connected.
* It cannot be done even on a TCP-style listening socket.
*/
if
((
SCTP_SS_ESTABLISHED
==
sk
->
state
)
||
((
SCTP_SOCKET_TCP
==
sp
->
type
)
&&
(
SCTP_SS_LISTENING
==
sk
->
state
)))
{
if
(
sctp_sstate
(
sk
,
ESTABLISHED
)
||
(
sctp_style
(
sk
,
TCP
)
&&
sctp_sstate
(
sk
,
LISTENING
)))
{
err
=
-
EISCONN
;
goto
out_unlock
;
}
...
...
@@ -1663,6 +1814,11 @@ SCTP_STATIC int sctp_connect(struct sock *sk, struct sockaddr *uaddr,
goto
out_unlock
;
}
/* Initialize sk's dport and daddr for getpeername() */
inet_sk
(
sk
)
->
dport
=
htons
(
asoc
->
peer
.
port
);
af
=
sctp_get_af_specific
(
to
.
sa
.
sa_family
);
af
->
to_sk_daddr
(
&
to
,
sk
);
timeo
=
sock_sndtimeo
(
sk
,
sk
->
socket
->
file
->
f_flags
&
O_NONBLOCK
);
err
=
sctp_wait_for_connect
(
asoc
,
&
timeo
);
...
...
@@ -1690,21 +1846,21 @@ SCTP_STATIC struct sock *sctp_accept(struct sock *sk, int flags, int *err)
struct
sctp_opt
*
sp
;
struct
sctp_endpoint
*
ep
;
struct
sock
*
newsk
=
NULL
;
struct
sctp_association
*
as
s
oc
;
struct
sctp_association
*
asoc
;
long
timeo
;
int
error
=
0
;
sctp_lock_sock
(
sk
);
sp
=
sctp_sk
(
sk
);
ep
=
sp
->
ep
;
if
(
SCTP_SOCKET_TCP
!=
sp
->
type
)
{
if
(
!
sctp_style
(
sk
,
TCP
)
)
{
error
=
-
EOPNOTSUPP
;
goto
out
;
}
if
(
SCTP_SS_LISTENING
!=
sk
->
state
)
{
if
(
!
sctp_sstate
(
sk
,
LISTENING
)
)
{
error
=
-
EINVAL
;
goto
out
;
}
...
...
@@ -1715,21 +1871,21 @@ SCTP_STATIC struct sock *sctp_accept(struct sock *sk, int flags, int *err)
if
(
error
)
goto
out
;
/* We treat the list of associations on the endpoint as the accept
* queue and pick the first association on the list.
/* We treat the list of associations on the endpoint as the accept
* queue and pick the first association on the list.
*/
as
s
oc
=
list_entry
(
ep
->
asocs
.
next
,
struct
sctp_association
,
asocs
);
asoc
=
list_entry
(
ep
->
asocs
.
next
,
struct
sctp_association
,
asocs
);
newsk
=
sp
->
pf
->
create_accept_sk
(
sk
,
as
soc
);
newsk
=
sp
->
pf
->
create_accept_sk
(
sk
,
as
oc
);
if
(
!
newsk
)
{
error
=
-
ENOMEM
;
goto
out
;
}
/* Populate the fields of the newsk from the oldsk and migrate the
* as
s
oc to the newsk.
*/
sctp_sock_migrate
(
sk
,
newsk
,
as
s
oc
,
SCTP_SOCKET_TCP
);
* asoc to the newsk.
*/
sctp_sock_migrate
(
sk
,
newsk
,
asoc
,
SCTP_SOCKET_TCP
);
out:
sctp_release_sock
(
sk
);
...
...
@@ -1737,10 +1893,10 @@ SCTP_STATIC struct sock *sctp_accept(struct sock *sk, int flags, int *err)
return
newsk
;
}
/*
FIXME: Write Comments
. */
/*
The SCTP ioctl handler
. */
SCTP_STATIC
int
sctp_ioctl
(
struct
sock
*
sk
,
int
cmd
,
unsigned
long
arg
)
{
return
-
E
OPNOTSUPP
;
/* STUB */
return
-
E
NOIOCTLCMD
;
}
/* This is the function which gets called during socket creation to
...
...
@@ -1749,7 +1905,7 @@ SCTP_STATIC int sctp_ioctl(struct sock *sk, int cmd, unsigned long arg)
*/
SCTP_STATIC
int
sctp_init_sock
(
struct
sock
*
sk
)
{
s
ctp_endpoint_
t
*
ep
;
s
truct
sctp_endpoin
t
*
ep
;
struct
sctp_protocol
*
proto
;
struct
sctp_opt
*
sp
;
...
...
@@ -1800,7 +1956,7 @@ SCTP_STATIC int sctp_init_sock(struct sock *sk)
* enable the events needed. By default, UDP-style
* sockets enable io and association change notifications.
*/
if
(
SCTP_SOCKET_UDP
==
sp
->
type
)
{
if
(
sctp_style
(
sk
,
UDP
)
)
{
sp
->
subscribe
.
sctp_data_io_event
=
1
;
sp
->
subscribe
.
sctp_association_event
=
1
;
}
...
...
@@ -1819,12 +1975,19 @@ SCTP_STATIC int sctp_init_sock(struct sock *sk)
/* Turn on/off any Nagle-like algorithm. */
sp
->
nodelay
=
1
;
/* Enable by default. */
sp
->
v4mapped
=
1
;
/* Auto-close idle associations after the configured
* number of seconds. A value of 0 disables this
* feature. Configure through the SCTP_AUTOCLOSE socket option,
* for UDP-style sockets only.
*/
sp
->
autoclose
=
0
;
/* User specified fragmentation limit. */
sp
->
user_frag
=
0
;
sp
->
pf
=
sctp_get_pf_specific
(
sk
->
family
);
/* Control variables for partial data delivery. */
...
...
@@ -1835,11 +1998,12 @@ SCTP_STATIC int sctp_init_sock(struct sock *sk)
* change the data structure relationships, this may still
* be useful for storing pre-connect address information.
*/
ep
=
sctp_endpoint_new
(
proto
,
sk
,
GFP_KERNEL
);
if
(
NULL
==
ep
)
ep
=
sctp_endpoint_new
(
sk
,
GFP_KERNEL
);
if
(
!
ep
)
return
-
ENOMEM
;
sp
->
ep
=
ep
;
sp
->
hmac
=
NULL
;
SCTP_DBG_OBJCNT_INC
(
sock
);
return
0
;
...
...
@@ -1848,7 +2012,7 @@ SCTP_STATIC int sctp_init_sock(struct sock *sk)
/* Cleanup any SCTP per socket resources. */
SCTP_STATIC
int
sctp_destroy_sock
(
struct
sock
*
sk
)
{
s
ctp_endpoint_
t
*
ep
;
s
truct
sctp_endpoin
t
*
ep
;
SCTP_DEBUG_PRINTK
(
"sctp_destroy_sock(sk: %p)
\n
"
,
sk
);
...
...
@@ -1859,11 +2023,38 @@ SCTP_STATIC int sctp_destroy_sock(struct sock *sk)
return
0
;
}
/* FIXME: Comments needed. */
/* API 4.1.7 shutdown() - TCP Style Syntax
* int shutdown(int socket, int how);
*
* sd - the socket descriptor of the association to be closed.
* how - Specifies the type of shutdown. The values are
* as follows:
* SHUT_RD
* Disables further receive operations. No SCTP
* protocol action is taken.
* SHUT_WR
* Disables further send operations, and initiates
* the SCTP shutdown sequence.
* SHUT_RDWR
* Disables further send and receive operations
* and initiates the SCTP shutdown sequence.
*/
SCTP_STATIC
void
sctp_shutdown
(
struct
sock
*
sk
,
int
how
)
{
/* UDP-style sockets do not support shutdown. */
/* STUB */
struct
sctp_endpoint
*
ep
;
struct
sctp_association
*
asoc
;
if
(
!
sctp_style
(
sk
,
TCP
))
return
;
if
(
how
&
SEND_SHUTDOWN
)
{
ep
=
sctp_sk
(
sk
)
->
ep
;
if
(
!
list_empty
(
&
ep
->
asocs
))
{
asoc
=
list_entry
(
ep
->
asocs
.
next
,
struct
sctp_association
,
asocs
);
sctp_primitive_SHUTDOWN
(
asoc
,
NULL
);
}
}
}
/* 7.2.1 Association Status (SCTP_STATUS)
...
...
@@ -1877,7 +2068,7 @@ static int sctp_getsockopt_sctp_status(struct sock *sk, int len, char *optval,
int
*
optlen
)
{
struct
sctp_status
status
;
s
ctp_association_t
*
as
soc
=
NULL
;
s
truct
sctp_association
*
a
soc
=
NULL
;
struct
sctp_transport
*
transport
;
sctp_assoc_t
associd
;
int
retval
=
0
;
...
...
@@ -1893,22 +2084,26 @@ static int sctp_getsockopt_sctp_status(struct sock *sk, int len, char *optval,
}
associd
=
status
.
sstat_assoc_id
;
as
s
oc
=
sctp_id2assoc
(
sk
,
associd
);
if
(
!
as
s
oc
)
{
asoc
=
sctp_id2assoc
(
sk
,
associd
);
if
(
!
asoc
)
{
retval
=
-
EINVAL
;
goto
out
;
}
transport
=
assoc
->
peer
.
primary_path
;
status
.
sstat_assoc_id
=
sctp_assoc2id
(
assoc
);
status
.
sstat_state
=
assoc
->
state
;
status
.
sstat_rwnd
=
assoc
->
peer
.
rwnd
;
status
.
sstat_unackdata
=
assoc
->
unack_data
;
status
.
sstat_penddata
=
assoc
->
peer
.
tsn_map
.
pending_data
;
status
.
sstat_instrms
=
assoc
->
c
.
sinit_max_instreams
;
status
.
sstat_outstrms
=
assoc
->
c
.
sinit_num_ostreams
;
status
.
sstat_fragmentation_point
=
assoc
->
frag_point
;
transport
=
asoc
->
peer
.
primary_path
;
status
.
sstat_assoc_id
=
sctp_assoc2id
(
asoc
);
status
.
sstat_state
=
asoc
->
state
;
status
.
sstat_rwnd
=
asoc
->
peer
.
rwnd
;
status
.
sstat_unackdata
=
asoc
->
unack_data
;
status
.
sstat_penddata
=
asoc
->
peer
.
tsn_map
.
pending_data
;
status
.
sstat_instrms
=
asoc
->
c
.
sinit_max_instreams
;
status
.
sstat_outstrms
=
asoc
->
c
.
sinit_num_ostreams
;
/* Just in time frag_point update. */
if
(
sctp_sk
(
sk
)
->
user_frag
)
asoc
->
frag_point
=
min_t
(
int
,
asoc
->
frag_point
,
sctp_sk
(
sk
)
->
user_frag
);
status
.
sstat_fragmentation_point
=
asoc
->
frag_point
;
status
.
sstat_primary
.
spinfo_assoc_id
=
sctp_assoc2id
(
transport
->
asoc
);
memcpy
(
&
status
.
sstat_primary
.
spinfo_address
,
&
(
transport
->
ipaddr
),
sizeof
(
union
sctp_addr
));
...
...
@@ -1965,7 +2160,7 @@ static int sctp_getsockopt_set_events(struct sock *sk, int len, char *optval, in
static
int
sctp_getsockopt_autoclose
(
struct
sock
*
sk
,
int
len
,
char
*
optval
,
int
*
optlen
)
{
/* Applicable to UDP-style socket only */
if
(
SCTP_SOCKET_TCP
==
sctp_sk
(
sk
)
->
type
)
if
(
sctp_style
(
sk
,
TCP
)
)
return
-
EOPNOTSUPP
;
if
(
len
!=
sizeof
(
int
))
return
-
EINVAL
;
...
...
@@ -1975,33 +2170,29 @@ static int sctp_getsockopt_autoclose(struct sock *sk, int len, char *optval, int
}
/* Helper routine to branch off an association to a new socket. */
SCTP_STATIC
int
sctp_do_peeloff
(
sctp_association_t
*
assoc
,
struct
socket
**
newsock
)
SCTP_STATIC
int
sctp_do_peeloff
(
struct
sctp_association
*
asoc
,
struct
socket
**
sockp
)
{
struct
sock
*
oldsk
=
assoc
->
base
.
sk
;
struct
sock
*
newsk
;
struct
socket
*
tmpsock
;
struct
sock
*
sk
=
asoc
->
base
.
sk
;
struct
socket
*
sock
;
int
err
=
0
;
/* An association cannot be branched off from an already peeled-off
* socket, nor is this supported for tcp style sockets.
*/
if
(
SCTP_SOCKET_UDP
!=
sctp_sk
(
oldsk
)
->
type
)
return
-
E
OPNOTSUPP
;
if
(
!
sctp_style
(
sk
,
UDP
)
)
return
-
E
INVAL
;
/* Create a new socket. */
err
=
sock_create
(
oldsk
->
family
,
SOCK_SEQPACKET
,
IPPROTO_SCTP
,
&
tmpsock
);
err
=
sock_create
(
sk
->
family
,
SOCK_SEQPACKET
,
IPPROTO_SCTP
,
&
sock
);
if
(
err
<
0
)
return
err
;
newsk
=
tmpsock
->
sk
;
/* Populate the fields of the newsk from the oldsk and migrate the
* assoc to the newsk.
*/
sctp_sock_migrate
(
oldsk
,
newsk
,
assoc
,
SCTP_SOCKET_UDP_HIGH_BANDWIDTH
);
*
newsock
=
tmpsock
;
* asoc to the newsk.
*/
sctp_sock_migrate
(
sk
,
sock
->
sk
,
asoc
,
SCTP_SOCKET_UDP_HIGH_BANDWIDTH
);
*
sockp
=
sock
;
return
err
;
}
...
...
@@ -2011,22 +2202,22 @@ static int sctp_getsockopt_peeloff(struct sock *sk, int len, char *optval, int *
sctp_peeloff_arg_t
peeloff
;
struct
socket
*
newsock
;
int
retval
=
0
;
s
ctp_association_t
*
as
soc
;
s
truct
sctp_association
*
a
soc
;
if
(
len
!=
sizeof
(
sctp_peeloff_arg_t
))
return
-
EINVAL
;
if
(
copy_from_user
(
&
peeloff
,
optval
,
len
))
return
-
EFAULT
;
as
s
oc
=
sctp_id2assoc
(
sk
,
peeloff
.
associd
);
if
(
NULL
==
as
soc
)
{
asoc
=
sctp_id2assoc
(
sk
,
peeloff
.
associd
);
if
(
!
a
soc
)
{
retval
=
-
EINVAL
;
goto
out
;
}
SCTP_DEBUG_PRINTK
(
"%s: sk: %p as
soc: %p
\n
"
,
__FUNCTION__
,
sk
,
as
soc
);
SCTP_DEBUG_PRINTK
(
"%s: sk: %p as
oc: %p
\n
"
,
__FUNCTION__
,
sk
,
a
soc
);
retval
=
sctp_do_peeloff
(
as
s
oc
,
&
newsock
);
retval
=
sctp_do_peeloff
(
asoc
,
&
newsock
);
if
(
retval
<
0
)
goto
out
;
...
...
@@ -2037,8 +2228,8 @@ static int sctp_getsockopt_peeloff(struct sock *sk, int len, char *optval, int *
goto
out
;
}
SCTP_DEBUG_PRINTK
(
"%s: sk: %p as
s
oc: %p newsk: %p sd: %d
\n
"
,
__FUNCTION__
,
sk
,
as
s
oc
,
newsock
->
sk
,
retval
);
SCTP_DEBUG_PRINTK
(
"%s: sk: %p asoc: %p newsk: %p sd: %d
\n
"
,
__FUNCTION__
,
sk
,
asoc
,
newsock
->
sk
,
retval
);
/* Return the fd mapped to the new socket. */
peeloff
.
sd
=
retval
;
...
...
@@ -2049,11 +2240,11 @@ static int sctp_getsockopt_peeloff(struct sock *sk, int len, char *optval, int *
return
retval
;
}
static
int
sctp_getsockopt_peer_addr_params
(
struct
sock
*
sk
,
int
len
,
static
int
sctp_getsockopt_peer_addr_params
(
struct
sock
*
sk
,
int
len
,
char
*
optval
,
int
*
optlen
)
{
struct
sctp_paddrparams
params
;
s
ctp_association_t
*
asoc
;
s
truct
sctp_association
*
asoc
;
union
sctp_addr
*
addr
;
struct
sctp_transport
*
trans
;
...
...
@@ -2088,7 +2279,9 @@ static int sctp_getsockopt_peer_addr_params(struct sock *sk, int len,
if
(
copy_to_user
(
optval
,
&
params
,
len
))
return
-
EFAULT
;
*
optlen
=
len
;
if
(
put_user
(
len
,
optlen
))
return
-
EFAULT
;
return
0
;
}
...
...
@@ -2102,11 +2295,11 @@ static int sctp_getsockopt_initmsg(struct sock *sk, int len, char *optval, int *
return
0
;
}
static
int
sctp_getsockopt_peer_addrs_num
(
struct
sock
*
sk
,
int
len
,
static
int
sctp_getsockopt_peer_addrs_num
(
struct
sock
*
sk
,
int
len
,
char
*
optval
,
int
*
optlen
)
{
sctp_assoc_t
id
;
s
ctp_association_t
*
asoc
;
s
truct
sctp_association
*
asoc
;
struct
list_head
*
pos
;
int
cnt
=
0
;
...
...
@@ -2135,7 +2328,7 @@ static int sctp_getsockopt_peer_addrs_num(struct sock *sk, int len,
static
int
sctp_getsockopt_peer_addrs
(
struct
sock
*
sk
,
int
len
,
char
*
optval
,
int
*
optlen
)
{
s
ctp_association_t
*
asoc
;
s
truct
sctp_association
*
asoc
;
struct
list_head
*
pos
;
int
cnt
=
0
;
struct
sctp_getaddrs
getaddrs
;
...
...
@@ -2176,8 +2369,8 @@ static int sctp_getsockopt_local_addrs_num(struct sock *sk, int len,
char
*
optval
,
int
*
optlen
)
{
sctp_assoc_t
id
;
s
ctp_bind_addr_t
*
bp
;
s
ctp_association_t
*
asoc
;
s
truct
sctp_bind_addr
*
bp
;
s
truct
sctp_association
*
asoc
;
struct
list_head
*
pos
;
int
cnt
=
0
;
...
...
@@ -2214,8 +2407,8 @@ static int sctp_getsockopt_local_addrs_num(struct sock *sk, int len,
static
int
sctp_getsockopt_local_addrs
(
struct
sock
*
sk
,
int
len
,
char
*
optval
,
int
*
optlen
)
{
s
ctp_bind_addr_t
*
bp
;
s
ctp_association_t
*
asoc
;
s
truct
sctp_bind_addr
*
bp
;
s
truct
sctp_association
*
asoc
;
struct
list_head
*
pos
;
int
cnt
=
0
;
struct
sctp_getaddrs
getaddrs
;
...
...
@@ -2317,7 +2510,7 @@ static int sctp_getsockopt_default_send_param(struct sock *sk,
int
len
,
char
*
optval
,
int
*
optlen
)
{
struct
sctp_sndrcvinfo
info
;
s
ctp_association_t
*
asoc
;
s
truct
sctp_association
*
asoc
;
if
(
len
!=
sizeof
(
struct
sctp_sndrcvinfo
))
return
-
EINVAL
;
...
...
@@ -2350,15 +2543,15 @@ static int sctp_getsockopt_default_send_param(struct sock *sk,
* integer boolean flag.
*/
static
int
sctp_getsockopt_nodelay
(
struct
sock
*
sk
,
int
len
,
char
*
optval
,
int
*
optlen
)
static
int
sctp_getsockopt_nodelay
(
struct
sock
*
sk
,
int
len
,
char
*
optval
,
int
*
optlen
)
{
__u8
val
;
int
val
;
if
(
len
<
sizeof
(
__u8
))
if
(
len
<
sizeof
(
int
))
return
-
EINVAL
;
len
=
sizeof
(
__u8
);
len
=
sizeof
(
int
);
val
=
(
sctp_sk
(
sk
)
->
nodelay
==
1
);
if
(
put_user
(
len
,
optlen
))
return
-
EFAULT
;
...
...
@@ -2366,6 +2559,62 @@ static int sctp_getsockopt_nodelay(struct sock *sk, int len,
return
-
EFAULT
;
return
0
;
}
/*
* 7.1.16 Set/clear IPv4 mapped addresses (SCTP_I_WANT_MAPPED_V4_ADDR)
*
* This socket option is a boolean flag which turns on or off mapped V4
* addresses. If this option is turned on and the socket is type
* PF_INET6, then IPv4 addresses will be mapped to V6 representation.
* If this option is turned off, then no mapping will be done of V4
* addresses and a user will receive both PF_INET6 and PF_INET type
* addresses on the socket.
*/
static
int
sctp_getsockopt_mappedv4
(
struct
sock
*
sk
,
int
len
,
char
*
optval
,
int
*
optlen
)
{
int
val
;
if
(
len
<
sizeof
(
int
))
return
-
EINVAL
;
len
=
sizeof
(
int
);
/* FIXME: Until we have support, return disabled. */
val
=
0
;
if
(
put_user
(
len
,
optlen
))
return
-
EFAULT
;
if
(
copy_to_user
(
optval
,
&
val
,
len
))
return
-
EFAULT
;
return
0
;
}
/*
* 7.1.17 Set the maximum fragrmentation size (SCTP_MAXSEG)
*
* This socket option specifies the maximum size to put in any outgoing
* SCTP chunk. If a message is larger than this size it will be
* fragmented by SCTP into the specified size. Note that the underlying
* SCTP implementation may fragment into smaller sized chunks when the
* PMTU of the underlying association is smaller than the value set by
* the user.
*/
static
int
sctp_getsockopt_maxseg
(
struct
sock
*
sk
,
int
len
,
char
*
optval
,
int
*
optlen
)
{
int
val
;
if
(
len
<
sizeof
(
int
))
return
-
EINVAL
;
len
=
sizeof
(
int
);
val
=
sctp_sk
(
sk
)
->
user_frag
;
if
(
put_user
(
len
,
optlen
))
return
-
EFAULT
;
if
(
copy_to_user
(
optval
,
&
val
,
len
))
return
-
EFAULT
;
return
0
;
}
SCTP_STATIC
int
sctp_getsockopt
(
struct
sock
*
sk
,
int
level
,
int
optname
,
char
*
optval
,
int
*
optlen
)
...
...
@@ -2418,7 +2667,7 @@ SCTP_STATIC int sctp_getsockopt(struct sock *sk, int level, int optname,
retval
=
sctp_getsockopt_initmsg
(
sk
,
len
,
optval
,
optlen
);
break
;
case
SCTP_GET_PEER_ADDRS_NUM
:
retval
=
sctp_getsockopt_peer_addrs_num
(
sk
,
len
,
optval
,
retval
=
sctp_getsockopt_peer_addrs_num
(
sk
,
len
,
optval
,
optlen
);
break
;
case
SCTP_GET_LOCAL_ADDRS_NUM
:
...
...
@@ -2443,6 +2692,12 @@ SCTP_STATIC int sctp_getsockopt(struct sock *sk, int level, int optname,
case
SCTP_NODELAY
:
retval
=
sctp_getsockopt_nodelay
(
sk
,
len
,
optval
,
optlen
);
break
;
case
SCTP_I_WANT_MAPPED_V4_ADDR
:
retval
=
sctp_getsockopt_mappedv4
(
sk
,
len
,
optval
,
optlen
);
break
;
case
SCTP_MAXSEG
:
retval
=
sctp_getsockopt_maxseg
(
sk
,
len
,
optval
,
optlen
);
break
;
default:
retval
=
-
ENOPROTOOPT
;
break
;
...
...
@@ -2553,7 +2808,7 @@ static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr)
}
if
(
pp
!=
NULL
&&
pp
->
sk
!=
NULL
)
{
if
(
pp
&&
pp
->
sk
)
{
/* We had a port hash table hit - there is an
* available port (pp != NULL) and it is being
* used by other socket (pp->sk != NULL); that other
...
...
@@ -2578,7 +2833,7 @@ static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr)
* in an endpoint.
*/
for
(
;
sk2
!=
NULL
;
sk2
=
sk2
->
bind_next
)
{
s
ctp_endpoint_
t
*
ep2
;
s
truct
sctp_endpoin
t
*
ep2
;
ep2
=
sctp_sk
(
sk2
)
->
ep
;
if
(
sk_reuse
&&
sk2
->
reuse
)
...
...
@@ -2601,18 +2856,17 @@ static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr)
/* If there was a hash table miss, create a new port. */
ret
=
1
;
if
(
pp
==
NULL
&&
(
pp
=
sctp_bucket_create
(
head
,
snum
))
==
NULL
)
if
(
!
pp
&&
!
(
pp
=
sctp_bucket_create
(
head
,
snum
))
)
goto
fail_unlock
;
/* In either case (hit or miss), make sure fastreuse is 1 only
* if sk->reuse is too (that is, if the caller requested
* SO_REUSEADDR on this socket -sk-).
*/
if
(
pp
->
sk
==
NULL
)
{
if
(
!
pp
->
sk
)
pp
->
fastreuse
=
sk
->
reuse
?
1
:
0
;
}
else
if
(
pp
->
fastreuse
&&
sk
->
reuse
==
0
)
{
else
if
(
pp
->
fastreuse
&&
sk
->
reuse
==
0
)
pp
->
fastreuse
=
0
;
}
/* We are set, so fill up all the data in the hash table
* entry, tie the socket list information with the rest of the
...
...
@@ -2669,15 +2923,15 @@ static int sctp_get_port(struct sock *sk, unsigned short snum)
SCTP_STATIC
int
sctp_seqpacket_listen
(
struct
sock
*
sk
,
int
backlog
)
{
struct
sctp_opt
*
sp
=
sctp_sk
(
sk
);
s
ctp_endpoint_
t
*
ep
=
sp
->
ep
;
s
truct
sctp_endpoin
t
*
ep
=
sp
->
ep
;
/* Only UDP style sockets that are not peeled off are allowed to
* listen().
*/
if
(
SCTP_SOCKET_UDP
!=
sp
->
type
)
if
(
!
sctp_style
(
sk
,
UDP
)
)
return
-
EINVAL
;
if
(
s
k
->
state
==
SCTP_SS_LISTENING
)
if
(
s
ctp_sstate
(
sk
,
LISTENING
)
)
return
0
;
/*
...
...
@@ -2702,15 +2956,15 @@ SCTP_STATIC int sctp_seqpacket_listen(struct sock *sk, int backlog)
/*
* 4.1.3 listen() - TCP Style Syntax
*
* Applications uses listen() to ready the SCTP endpoint for accepting
* Applications uses listen() to ready the SCTP endpoint for accepting
* inbound associations.
*/
SCTP_STATIC
int
sctp_stream_listen
(
struct
sock
*
sk
,
int
backlog
)
{
struct
sctp_opt
*
sp
=
sctp_sk
(
sk
);
s
ctp_endpoint_
t
*
ep
=
sp
->
ep
;
s
truct
sctp_endpoin
t
*
ep
=
sp
->
ep
;
if
(
s
k
->
state
==
SCTP_SS_LISTENING
)
if
(
s
ctp_sstate
(
sk
,
LISTENING
)
)
return
0
;
/*
...
...
@@ -2739,15 +2993,25 @@ SCTP_STATIC int sctp_stream_listen(struct sock *sk, int backlog)
int
sctp_inet_listen
(
struct
socket
*
sock
,
int
backlog
)
{
struct
sock
*
sk
=
sock
->
sk
;
int
err
;
struct
crypto_tfm
*
tfm
=
NULL
;
int
err
=
-
EINVAL
;
if
(
unlikely
(
backlog
<
0
))
goto
out
;
sctp_lock_sock
(
sk
);
err
=
-
EINVAL
;
if
(
sock
->
state
!=
SS_UNCONNECTED
)
goto
out
;
if
(
unlikely
(
backlog
<
0
))
goto
out
;
/* Allocate HMAC for generating cookie. */
if
(
sctp_hmac_alg
)
{
tfm
=
sctp_crypto_alloc_tfm
(
sctp_hmac_alg
,
0
);
if
(
!
tfm
)
{
err
=
-
ENOSYS
;
goto
out
;
}
}
switch
(
sock
->
type
)
{
case
SOCK_SEQPACKET
:
...
...
@@ -2756,14 +3020,21 @@ int sctp_inet_listen(struct socket *sock, int backlog)
case
SOCK_STREAM
:
err
=
sctp_stream_listen
(
sk
,
backlog
);
break
;
default:
goto
out
;
break
;
};
if
(
err
)
goto
cleanup
;
/* Store away the transform reference. */
sctp_sk
(
sk
)
->
hmac
=
tfm
;
out:
sctp_release_sock
(
sk
);
return
err
;
cleanup:
if
(
tfm
)
sctp_crypto_free_tfm
(
tfm
);
goto
out
;
}
/*
...
...
@@ -2782,9 +3053,18 @@ int sctp_inet_listen(struct socket *sock, int backlog)
unsigned
int
sctp_poll
(
struct
file
*
file
,
struct
socket
*
sock
,
poll_table
*
wait
)
{
struct
sock
*
sk
=
sock
->
sk
;
struct
sctp_opt
*
sp
=
sctp_sk
(
sk
);
unsigned
int
mask
;
poll_wait
(
file
,
sk
->
sleep
,
wait
);
/* A TCP-style listening socket becomes readable when the accept queue
* is not empty.
*/
if
(
sctp_style
(
sk
,
TCP
)
&&
sctp_sstate
(
sk
,
LISTENING
))
return
(
!
list_empty
(
&
sp
->
ep
->
asocs
))
?
(
POLLIN
|
POLLRDNORM
)
:
0
;
mask
=
0
;
/* Is there any exceptional events? */
...
...
@@ -2798,19 +3078,9 @@ unsigned int sctp_poll(struct file *file, struct socket *sock, poll_table *wait)
(
sk
->
shutdown
&
RCV_SHUTDOWN
))
mask
|=
POLLIN
|
POLLRDNORM
;
/*
* FIXME: We need to set SCTP_SS_DISCONNECTING for TCP-style and
* peeled off sockets. Additionally, TCP-style needs to consider
* other establishment conditions.
*/
if
(
SCTP_SOCKET_UDP
!=
sctp_sk
(
sk
)
->
type
)
{
/* The association is going away. */
if
(
SCTP_SS_DISCONNECTING
==
sk
->
state
)
mask
|=
POLLHUP
;
/* The association is either gone or not ready. */
if
(
SCTP_SS_CLOSED
==
sk
->
state
)
return
mask
;
}
/* The association is either gone or not ready. */
if
(
!
sctp_style
(
sk
,
UDP
)
&&
sctp_sstate
(
sk
,
CLOSED
))
return
mask
;
/* Is it writable? */
if
(
sctp_writeable
(
sk
))
{
...
...
@@ -2967,7 +3237,7 @@ SCTP_STATIC int sctp_msghdr_parse(const struct msghdr *msg,
/* Strictly check lengths following example in SCM code. */
switch
(
cmsg
->
cmsg_type
)
{
case
SCTP_INIT
:
/* SCTP Socket API Extension
(draft 1)
/* SCTP Socket API Extension
* 5.2.1 SCTP Initiation Structure (SCTP_INIT)
*
* This cmsghdr structure provides information for
...
...
@@ -2987,7 +3257,7 @@ SCTP_STATIC int sctp_msghdr_parse(const struct msghdr *msg,
break
;
case
SCTP_SNDRCV
:
/* SCTP Socket API Extension
(draft 1)
/* SCTP Socket API Extension
* 5.2.2 SCTP Header Information Structure(SCTP_SNDRCV)
*
* This cmsghdr structure specifies SCTP options for
...
...
@@ -3002,7 +3272,8 @@ SCTP_STATIC int sctp_msghdr_parse(const struct msghdr *msg,
CMSG_LEN
(
sizeof
(
struct
sctp_sndrcvinfo
)))
return
-
EINVAL
;
cmsgs
->
info
=
(
struct
sctp_sndrcvinfo
*
)
CMSG_DATA
(
cmsg
);
cmsgs
->
info
=
(
struct
sctp_sndrcvinfo
*
)
CMSG_DATA
(
cmsg
);
/* Minimally, validate the sinfo_flags. */
if
(
cmsgs
->
info
->
sinfo_flags
&
...
...
@@ -3026,10 +3297,9 @@ SCTP_STATIC int sctp_msghdr_parse(const struct msghdr *msg,
static
int
sctp_wait_for_packet
(
struct
sock
*
sk
,
int
*
err
,
long
*
timeo_p
)
{
int
error
;
DE
CLARE_WAITQUEUE
(
wait
,
curren
t
);
DE
FINE_WAIT
(
wai
t
);
__set_current_state
(
TASK_INTERRUPTIBLE
);
add_wait_queue_exclusive
(
sk
->
sleep
,
&
wait
);
prepare_to_wait_exclusive
(
sk
->
sleep
,
&
wait
,
TASK_INTERRUPTIBLE
);
/* Socket errors? */
error
=
sock_error
(
sk
);
...
...
@@ -3049,8 +3319,7 @@ static int sctp_wait_for_packet(struct sock * sk, int *err, long *timeo_p)
error
=
-
ENOTCONN
;
/* Is there a good reason to think that we may receive some data? */
if
((
list_empty
(
&
sctp_sk
(
sk
)
->
ep
->
asocs
))
&&
(
sk
->
state
!=
SCTP_SS_LISTENING
))
if
(
list_empty
(
&
sctp_sk
(
sk
)
->
ep
->
asocs
)
&&
!
sctp_sstate
(
sk
,
LISTENING
))
goto
out
;
/* Handle signals. */
...
...
@@ -3067,16 +3336,14 @@ static int sctp_wait_for_packet(struct sock * sk, int *err, long *timeo_p)
sctp_lock_sock
(
sk
);
ready:
remove_wait_queue
(
sk
->
sleep
,
&
wait
);
__set_current_state
(
TASK_RUNNING
);
finish_wait
(
sk
->
sleep
,
&
wait
);
return
0
;
interrupted:
error
=
sock_intr_errno
(
*
timeo_p
);
out:
remove_wait_queue
(
sk
->
sleep
,
&
wait
);
__set_current_state
(
TASK_RUNNING
);
finish_wait
(
sk
->
sleep
,
&
wait
);
*
err
=
error
;
return
error
;
}
...
...
@@ -3085,13 +3352,14 @@ static int sctp_wait_for_packet(struct sock * sk, int *err, long *timeo_p)
* Note: This is pretty much the same routine as in core/datagram.c
* with a few changes to make lksctp work.
*/
static
struct
sk_buff
*
sctp_skb_recv_datagram
(
struct
sock
*
sk
,
int
flags
,
int
noblock
,
int
*
err
)
static
struct
sk_buff
*
sctp_skb_recv_datagram
(
struct
sock
*
sk
,
int
flags
,
int
noblock
,
int
*
err
)
{
int
error
;
struct
sk_buff
*
skb
;
long
timeo
;
/* Caller is allowed not to check sk->err before
skb_recv_datagram()
*/
/* Caller is allowed not to check sk->err before
calling.
*/
error
=
sock_error
(
sk
);
if
(
error
)
goto
no_packet
;
...
...
@@ -3126,6 +3394,9 @@ static struct sk_buff *sctp_skb_recv_datagram(struct sock *sk, int flags, int no
if
(
skb
)
return
skb
;
if
(
sk
->
shutdown
&
RCV_SHUTDOWN
)
break
;
/* User doesn't want to wait. */
error
=
-
EAGAIN
;
if
(
!
timeo
)
...
...
@@ -3140,7 +3411,7 @@ static struct sk_buff *sctp_skb_recv_datagram(struct sock *sk, int flags, int no
}
/* Verify that this is a valid address. */
static
inline
int
sctp_verify_addr
(
struct
sock
*
sk
,
union
sctp_addr
*
addr
,
static
inline
int
sctp_verify_addr
(
struct
sock
*
sk
,
union
sctp_addr
*
addr
,
int
len
)
{
struct
sctp_af
*
af
;
...
...
@@ -3161,7 +3432,7 @@ static inline int sctp_verify_addr(struct sock *sk, union sctp_addr *addr,
}
/* Get the sndbuf space available at the time on the association. */
static
inline
int
sctp_wspace
(
s
ctp_association_t
*
asoc
)
static
inline
int
sctp_wspace
(
s
truct
sctp_association
*
asoc
)
{
struct
sock
*
sk
=
asoc
->
base
.
sk
;
int
amt
=
0
;
...
...
@@ -3181,9 +3452,9 @@ static inline int sctp_wspace(sctp_association_t *asoc)
* destructor in the data chunk skb for the purpose of the sndbuf space
* tracking.
*/
static
inline
void
sctp_set_owner_w
(
s
ctp_chunk_t
*
chunk
)
static
inline
void
sctp_set_owner_w
(
s
truct
sctp_chunk
*
chunk
)
{
s
ctp_association_t
*
asoc
=
chunk
->
asoc
;
s
truct
sctp_association
*
asoc
=
chunk
->
asoc
;
struct
sock
*
sk
=
asoc
->
base
.
sk
;
/* The sndbuf space is tracked per association. */
...
...
@@ -3191,14 +3462,14 @@ static inline void sctp_set_owner_w(sctp_chunk_t *chunk)
chunk
->
skb
->
destructor
=
sctp_wfree
;
/* Save the chunk pointer in skb for sctp_wfree to use later. */
*
((
s
ctp_chunk_t
**
)(
chunk
->
skb
->
cb
))
=
chunk
;
*
((
s
truct
sctp_chunk
**
)(
chunk
->
skb
->
cb
))
=
chunk
;
asoc
->
sndbuf_used
+=
SCTP_DATA_SNDSIZE
(
chunk
);
sk
->
wmem_queued
+=
SCTP_DATA_SNDSIZE
(
chunk
);
}
/* If sndbuf has changed, wake up per association sndbuf waiters. */
static
void
__sctp_write_space
(
s
ctp_association_t
*
asoc
)
static
void
__sctp_write_space
(
s
truct
sctp_association
*
asoc
)
{
struct
sock
*
sk
=
asoc
->
base
.
sk
;
struct
socket
*
sock
=
sk
->
socket
;
...
...
@@ -3228,12 +3499,12 @@ static void __sctp_write_space(sctp_association_t *asoc)
*/
static
void
sctp_wfree
(
struct
sk_buff
*
skb
)
{
s
ctp_association_t
*
asoc
;
s
ctp_chunk_t
*
chunk
;
s
truct
sctp_association
*
asoc
;
s
truct
sctp_chunk
*
chunk
;
struct
sock
*
sk
;
/* Get the saved chunk pointer. */
chunk
=
*
((
s
ctp_chunk_t
**
)(
skb
->
cb
));
chunk
=
*
((
s
truct
sctp_chunk
**
)(
skb
->
cb
));
asoc
=
chunk
->
asoc
;
sk
=
asoc
->
base
.
sk
;
asoc
->
sndbuf_used
-=
SCTP_DATA_SNDSIZE
(
chunk
);
...
...
@@ -3244,24 +3515,24 @@ static void sctp_wfree(struct sk_buff *skb)
}
/* Helper function to wait for space in the sndbuf. */
static
int
sctp_wait_for_sndbuf
(
s
ctp_association_t
*
asoc
,
long
*
timeo_p
,
static
int
sctp_wait_for_sndbuf
(
s
truct
sctp_association
*
asoc
,
long
*
timeo_p
,
int
msg_len
)
{
struct
sock
*
sk
=
asoc
->
base
.
sk
;
int
err
=
0
;
long
current_timeo
=
*
timeo_p
;
DE
CLARE_WAITQUEUE
(
wait
,
curren
t
);
DE
FINE_WAIT
(
wai
t
);
SCTP_DEBUG_PRINTK
(
"wait_for_sndbuf: asoc=%p, timeo=%ld, msg_len=%d
\n
"
,
asoc
,
(
long
)(
*
timeo_p
),
msg_len
);
/* Wait on the association specific sndbuf space. */
add_wait_queue_exclusive
(
&
asoc
->
wait
,
&
wait
);
/* Increment the association's refcnt. */
sctp_association_hold
(
asoc
);
/* Wait on the association specific sndbuf space. */
for
(;;)
{
set_current_state
(
TASK_INTERRUPTIBLE
);
prepare_to_wait_exclusive
(
&
asoc
->
wait
,
&
wait
,
TASK_INTERRUPTIBLE
);
if
(
!*
timeo_p
)
goto
do_nonblock
;
if
(
sk
->
err
||
asoc
->
state
>=
SCTP_STATE_SHUTDOWN_PENDING
||
...
...
@@ -3283,12 +3554,11 @@ static int sctp_wait_for_sndbuf(sctp_association_t *asoc, long *timeo_p,
}
out:
remove_wait_queue
(
&
asoc
->
wait
,
&
wait
);
finish_wait
(
&
asoc
->
wait
,
&
wait
);
/* Release the association's refcnt. */
sctp_association_put
(
asoc
);
__set_current_state
(
TASK_RUNNING
);
return
err
;
do_error:
...
...
@@ -3307,12 +3577,12 @@ static int sctp_wait_for_sndbuf(sctp_association_t *asoc, long *timeo_p,
/* If socket sndbuf has changed, wake up all per association waiters. */
void
sctp_write_space
(
struct
sock
*
sk
)
{
s
ctp_association_t
*
asoc
;
s
truct
sctp_association
*
asoc
;
struct
list_head
*
pos
;
/* Wake up the tasks in each wait queue. */
list_for_each
(
pos
,
&
((
sctp_sk
(
sk
))
->
ep
->
asocs
))
{
asoc
=
list_entry
(
pos
,
s
ctp_association_t
,
asocs
);
asoc
=
list_entry
(
pos
,
s
truct
sctp_association
,
asocs
);
__sctp_write_space
(
asoc
);
}
}
...
...
@@ -3341,32 +3611,33 @@ static int sctp_writeable(struct sock *sk)
/* Wait for an association to go into ESTABLISHED state. If timeout is 0,
* returns immediately with EINPROGRESS.
*/
static
int
sctp_wait_for_connect
(
s
ctp_association_t
*
asoc
,
long
*
timeo_p
)
static
int
sctp_wait_for_connect
(
s
truct
sctp_association
*
asoc
,
long
*
timeo_p
)
{
struct
sock
*
sk
=
asoc
->
base
.
sk
;
int
err
=
0
;
long
current_timeo
=
*
timeo_p
;
DE
CLARE_WAITQUEUE
(
wait
,
curren
t
);
DE
FINE_WAIT
(
wai
t
);
SCTP_DEBUG_PRINTK
(
"%s: asoc=%p, timeo=%ld
\n
"
,
__FUNCTION__
,
asoc
,
(
long
)(
*
timeo_p
));
add_wait_queue_exclusive
(
&
asoc
->
wait
,
&
wait
);
/* Increment the association's refcnt. */
sctp_association_hold
(
asoc
);
for
(;;)
{
__set_current_state
(
TASK_INTERRUPTIBLE
);
prepare_to_wait_exclusive
(
&
asoc
->
wait
,
&
wait
,
TASK_INTERRUPTIBLE
);
if
(
!*
timeo_p
)
goto
do_nonblock
;
if
(
sk
->
shutdown
&
RCV_SHUTDOWN
)
break
;
if
(
sk
->
err
||
asoc
->
state
>=
SCTP_STATE_SHUTDOWN_PENDING
||
asoc
->
base
.
dead
)
goto
do_error
;
if
(
signal_pending
(
current
))
goto
do_interrupted
;
if
(
asoc
->
state
==
SCTP_STATE_ESTABLISHED
)
if
(
sctp_state
(
asoc
,
ESTABLISHED
)
)
break
;
/* Let another process have a go. Since we are going
...
...
@@ -3380,13 +3651,11 @@ static int sctp_wait_for_connect(sctp_association_t *asoc, long *timeo_p)
}
out:
remove_wait_queue
(
&
asoc
->
wait
,
&
wait
);
finish_wait
(
&
asoc
->
wait
,
&
wait
);
/* Release the association's refcnt. */
sctp_association_put
(
asoc
);
__set_current_state
(
TASK_RUNNING
);
return
err
;
do_error:
...
...
@@ -3406,14 +3675,14 @@ static int sctp_wait_for_accept(struct sock *sk, long timeo)
{
struct
sctp_endpoint
*
ep
;
int
err
=
0
;
DE
CLARE_WAITQUEUE
(
wait
,
curren
t
);
DE
FINE_WAIT
(
wai
t
);
ep
=
sctp_sk
(
sk
)
->
ep
;
add_wait_queue_exclusive
(
sk
->
sleep
,
&
wait
);
for
(;;)
{
__set_current_state
(
TASK_INTERRUPTIBLE
);
prepare_to_wait_exclusive
(
sk
->
sleep
,
&
wait
,
TASK_INTERRUPTIBLE
);
if
(
list_empty
(
&
ep
->
asocs
))
{
sctp_release_sock
(
sk
);
timeo
=
schedule_timeout
(
timeo
);
...
...
@@ -3421,7 +3690,7 @@ static int sctp_wait_for_accept(struct sock *sk, long timeo)
}
err
=
-
EINVAL
;
if
(
sk
->
state
!=
SCTP_SS_LISTENING
)
if
(
!
sctp_sstate
(
sk
,
LISTENING
)
)
break
;
err
=
0
;
...
...
@@ -3437,21 +3706,37 @@ static int sctp_wait_for_accept(struct sock *sk, long timeo)
break
;
}
remove_wait_queue
(
sk
->
sleep
,
&
wait
);
__set_current_state
(
TASK_RUNNING
);
finish_wait
(
sk
->
sleep
,
&
wait
);
return
err
;
}
/* Populate the fields of the newsk from the oldsk and migrate the assoc
void
sctp_wait_for_close
(
struct
sock
*
sk
,
long
timeout
)
{
DEFINE_WAIT
(
wait
);
do
{
prepare_to_wait
(
sk
->
sleep
,
&
wait
,
TASK_INTERRUPTIBLE
);
if
(
list_empty
(
&
sctp_sk
(
sk
)
->
ep
->
asocs
))
break
;
sctp_release_sock
(
sk
);
timeout
=
schedule_timeout
(
timeout
);
sctp_lock_sock
(
sk
);
}
while
(
!
signal_pending
(
current
)
&&
timeout
);
finish_wait
(
sk
->
sleep
,
&
wait
);
}
/* Populate the fields of the newsk from the oldsk and migrate the assoc
* and its messages to the newsk.
*/
void
sctp_sock_migrate
(
struct
sock
*
oldsk
,
struct
sock
*
newsk
,
struct
sctp_association
*
assoc
,
sctp_socket_type_t
type
)
*/
static
void
sctp_sock_migrate
(
struct
sock
*
oldsk
,
struct
sock
*
newsk
,
struct
sctp_association
*
assoc
,
sctp_socket_type_t
type
)
{
struct
sctp_opt
*
oldsp
=
sctp_sk
(
oldsk
);
struct
sctp_opt
*
newsp
=
sctp_sk
(
newsk
);
s
ctp_endpoint_
t
*
newep
=
newsp
->
ep
;
s
truct
sctp_endpoin
t
*
newep
=
newsp
->
ep
;
struct
sk_buff
*
skb
,
*
tmp
;
struct
sctp_ulpevent
*
event
;
...
...
@@ -3466,6 +3751,7 @@ void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk,
* copy.
*/
newsp
->
ep
=
newep
;
newsp
->
hmac
=
NULL
;
/* Move any messages in the old socket's receive queue that are for the
* peeled off association to the new socket's receive queue.
...
...
@@ -3524,9 +3810,15 @@ void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk,
/* Migrate the association to the new socket. */
sctp_assoc_migrate
(
assoc
,
newsk
);
/* If the association on the newsk is already closed before accept()
* is called, set RCV_SHUTDOWN flag.
*/
if
(
sctp_state
(
assoc
,
CLOSED
)
&&
sctp_style
(
newsk
,
TCP
))
newsk
->
shutdown
|=
RCV_SHUTDOWN
;
newsk
->
state
=
SCTP_SS_ESTABLISHED
;
}
/* This proto struct describes the ULP interface for SCTP. */
struct
proto
sctp_prot
=
{
.
name
=
"SCTP"
,
...
...
net/sctp/ssnmap.c
View file @
3e446c25
...
...
@@ -53,11 +53,11 @@ static inline size_t sctp_ssnmap_size(__u16 in, __u16 out)
/* Create a new sctp_ssnmap.
* Allocate room to store at least 'len' contiguous TSNs.
*/
struct
sctp_ssnmap
*
sctp_ssnmap_new
(
__u16
in
,
__u16
out
,
int
priority
)
struct
sctp_ssnmap
*
sctp_ssnmap_new
(
__u16
in
,
__u16
out
,
int
gfp
)
{
struct
sctp_ssnmap
*
retval
;
retval
=
kmalloc
(
sctp_ssnmap_size
(
in
,
out
),
priority
);
retval
=
kmalloc
(
sctp_ssnmap_size
(
in
,
out
),
gfp
);
if
(
!
retval
)
goto
fail
;
...
...
net/sctp/transport.c
View file @
3e446c25
...
...
@@ -54,16 +54,15 @@
/* 1st Level Abstractions. */
/* Allocate and initialize a new transport. */
struct
sctp_transport
*
sctp_transport_new
(
const
union
sctp_addr
*
addr
,
int
priority
)
struct
sctp_transport
*
sctp_transport_new
(
const
union
sctp_addr
*
addr
,
int
gfp
)
{
struct
sctp_transport
*
transport
;
transport
=
t_new
(
struct
sctp_transport
,
priority
);
transport
=
t_new
(
struct
sctp_transport
,
gfp
);
if
(
!
transport
)
goto
fail
;
if
(
!
sctp_transport_init
(
transport
,
addr
,
priority
))
if
(
!
sctp_transport_init
(
transport
,
addr
,
gfp
))
goto
fail_init
;
transport
->
malloced
=
1
;
...
...
@@ -81,7 +80,7 @@ struct sctp_transport *sctp_transport_new(const union sctp_addr *addr,
/* Initialize a new transport from provided memory. */
struct
sctp_transport
*
sctp_transport_init
(
struct
sctp_transport
*
peer
,
const
union
sctp_addr
*
addr
,
int
priority
)
int
gfp
)
{
struct
sctp_protocol
*
proto
=
sctp_get_protocol
();
...
...
@@ -117,8 +116,6 @@ struct sctp_transport *sctp_transport_init(struct sctp_transport *peer,
peer
->
error_threshold
=
0
;
peer
->
error_count
=
0
;
peer
->
debug_name
=
"unnamedtransport"
;
INIT_LIST_HEAD
(
&
peer
->
transmitted
);
INIT_LIST_HEAD
(
&
peer
->
send_ready
);
INIT_LIST_HEAD
(
&
peer
->
transports
);
...
...
@@ -138,6 +135,13 @@ struct sctp_transport *sctp_transport_init(struct sctp_transport *peer,
peer
->
dead
=
0
;
peer
->
malloced
=
0
;
/* Initialize the state information for SFR-CACC */
peer
->
cacc
.
changeover_active
=
0
;
peer
->
cacc
.
cycling_changeover
=
0
;
peer
->
cacc
.
next_tsn_at_change
=
0
;
peer
->
cacc
.
cacc_saw_newack
=
0
;
return
peer
;
}
...
...
@@ -199,13 +203,13 @@ void sctp_transport_reset_timers(struct sctp_transport *transport)
* Register the reference count in the association.
*/
void
sctp_transport_set_owner
(
struct
sctp_transport
*
transport
,
s
ctp_association_t
*
asoc
)
s
truct
sctp_association
*
asoc
)
{
transport
->
asoc
=
asoc
;
sctp_association_hold
(
asoc
);
}
/* Initialize the pmtu of a transport. */
/* Initialize the pmtu of a transport. */
void
sctp_transport_pmtu
(
struct
sctp_transport
*
transport
)
{
struct
dst_entry
*
dst
;
...
...
@@ -225,7 +229,7 @@ void sctp_transport_pmtu(struct sctp_transport *transport)
void
sctp_transport_route
(
struct
sctp_transport
*
transport
,
union
sctp_addr
*
saddr
,
struct
sctp_opt
*
opt
)
{
s
ctp_association_t
*
asoc
=
transport
->
asoc
;
s
truct
sctp_association
*
asoc
=
transport
->
asoc
;
struct
sctp_af
*
af
=
transport
->
af_specific
;
union
sctp_addr
*
daddr
=
&
transport
->
ipaddr
;
struct
dst_entry
*
dst
;
...
...
@@ -238,9 +242,15 @@ void sctp_transport_route(struct sctp_transport *transport,
af
->
get_saddr
(
asoc
,
dst
,
daddr
,
&
transport
->
saddr
);
transport
->
dst
=
dst
;
if
(
dst
)
if
(
dst
)
{
transport
->
pmtu
=
dst_pmtu
(
dst
);
else
/* Initialize sk->rcv_saddr, if the transport is the
* association's active path for getsockname().
*/
if
(
asoc
&&
(
transport
==
asoc
->
peer
.
active_path
))
af
->
to_sk_saddr
(
&
transport
->
saddr
,
asoc
->
base
.
sk
);
}
else
transport
->
pmtu
=
SCTP_DEFAULT_MAXSEGMENT
;
}
...
...
@@ -359,7 +369,7 @@ void sctp_transport_raise_cwnd(struct sctp_transport *transport,
* two conditions are met can the cwnd be increased otherwise
* the cwnd MUST not be increased. If these conditions are met
* then cwnd MUST be increased by at most the lesser of
* 1) the total size of the previously outstanding DATA
* 1) the total size of the previously outstanding DATA
* chunk(s) acknowledged, and 2) the destination's path MTU.
*/
if
(
bytes_acked
>
pmtu
)
...
...
@@ -373,17 +383,17 @@ void sctp_transport_raise_cwnd(struct sctp_transport *transport,
transport
,
bytes_acked
,
cwnd
,
ssthresh
,
flight_size
,
pba
);
}
else
{
/* RFC 2960 7.2.2 Whenever cwnd is greater than ssthresh,
* upon each SACK arrival that advances the Cumulative TSN Ack
* Point, increase partial_bytes_acked by the total number of
* bytes of all new chunks acknowledged in that SACK including
* chunks acknowledged by the new Cumulative TSN Ack and by
/* RFC 2960 7.2.2 Whenever cwnd is greater than ssthresh,
* upon each SACK arrival that advances the Cumulative TSN Ack
* Point, increase partial_bytes_acked by the total number of
* bytes of all new chunks acknowledged in that SACK including
* chunks acknowledged by the new Cumulative TSN Ack and by
* Gap Ack Blocks.
*
* When partial_bytes_acked is equal to or greater than cwnd
* and before the arrival of the SACK the sender had cwnd or
* more bytes of data outstanding (i.e., before arrival of the
* SACK, flightsize was greater than or equal to cwnd),
* When partial_bytes_acked is equal to or greater than cwnd
* and before the arrival of the SACK the sender had cwnd or
* more bytes of data outstanding (i.e., before arrival of the
* SACK, flightsize was greater than or equal to cwnd),
* increase cwnd by MTU, and reset partial_bytes_acked to
* (partial_bytes_acked - cwnd).
*/
...
...
net/sctp/tsnmap.c
View file @
3e446c25
...
...
@@ -55,13 +55,12 @@ static void sctp_tsnmap_find_gap_ack(__u8 *map, __u16 off,
/* Create a new sctp_tsnmap.
* Allocate room to store at least 'len' contiguous TSNs.
*/
struct
sctp_tsnmap
*
sctp_tsnmap_new
(
__u16
len
,
__u32
initial_tsn
,
int
priority
)
struct
sctp_tsnmap
*
sctp_tsnmap_new
(
__u16
len
,
__u32
initial_tsn
,
int
gfp
)
{
struct
sctp_tsnmap
*
retval
;
retval
=
kmalloc
(
sizeof
(
struct
sctp_tsnmap
)
+
sctp_tsnmap_storage_size
(
len
),
priority
);
sctp_tsnmap_storage_size
(
len
),
gfp
);
if
(
!
retval
)
goto
fail
;
...
...
net/sctp/ulpevent.c
View file @
3e446c25
...
...
@@ -52,12 +52,12 @@ static void sctp_ulpevent_set_owner(struct sk_buff *skb,
const
struct
sctp_association
*
asoc
);
/* Create a new sctp_ulpevent. */
struct
sctp_ulpevent
*
sctp_ulpevent_new
(
int
size
,
int
msg_flags
,
int
priority
)
struct
sctp_ulpevent
*
sctp_ulpevent_new
(
int
size
,
int
msg_flags
,
int
gfp
)
{
struct
sctp_ulpevent
*
event
;
struct
sk_buff
*
skb
;
skb
=
alloc_skb
(
size
,
priority
);
skb
=
alloc_skb
(
size
,
gfp
);
if
(
!
skb
)
goto
fail
;
...
...
@@ -106,16 +106,16 @@ int sctp_ulpevent_is_notification(const struct sctp_ulpevent *event)
* zero'd out.
*/
struct
sctp_ulpevent
*
sctp_ulpevent_make_assoc_change
(
const
s
ctp_association_t
*
asoc
,
const
s
truct
sctp_association
*
asoc
,
__u16
flags
,
__u16
state
,
__u16
error
,
__u16
outbound
,
__u16
inbound
,
int
priority
)
__u16
inbound
,
int
gfp
)
{
struct
sctp_ulpevent
*
event
;
struct
sctp_assoc_change
*
sac
;
struct
sk_buff
*
skb
;
event
=
sctp_ulpevent_new
(
sizeof
(
struct
sctp_assoc_change
),
MSG_NOTIFICATION
,
priority
);
MSG_NOTIFICATION
,
gfp
);
if
(
!
event
)
goto
fail
;
skb
=
sctp_event2skb
(
event
);
...
...
@@ -207,15 +207,16 @@ struct sctp_ulpevent *sctp_ulpevent_make_assoc_change(
* an interface details event is sent.
*/
struct
sctp_ulpevent
*
sctp_ulpevent_make_peer_addr_change
(
const
sctp_association_t
*
asoc
,
const
struct
sockaddr_storage
*
aaddr
,
int
flags
,
int
state
,
int
error
,
int
priority
)
const
struct
sctp_association
*
asoc
,
const
struct
sockaddr_storage
*
aaddr
,
int
flags
,
int
state
,
int
error
,
int
gfp
)
{
struct
sctp_ulpevent
*
event
;
struct
sctp_paddr_change
*
spc
;
struct
sk_buff
*
skb
;
event
=
sctp_ulpevent_new
(
sizeof
(
struct
sctp_paddr_change
),
MSG_NOTIFICATION
,
priority
);
MSG_NOTIFICATION
,
gfp
);
if
(
!
event
)
goto
fail
;
...
...
@@ -315,8 +316,8 @@ struct sctp_ulpevent *sctp_ulpevent_make_peer_addr_change(
* error formats.
*/
struct
sctp_ulpevent
*
sctp_ulpevent_make_remote_error
(
const
s
ctp_association_t
*
asoc
,
sctp_chunk_t
*
chunk
,
__u16
flags
,
int
priority
)
const
s
truct
sctp_association
*
asoc
,
struct
sctp_chunk
*
chunk
,
__u16
flags
,
int
gfp
)
{
struct
sctp_ulpevent
*
event
;
struct
sctp_remote_error
*
sre
;
...
...
@@ -327,7 +328,7 @@ struct sctp_ulpevent *sctp_ulpevent_make_remote_error(
ch
=
(
sctp_errhdr_t
*
)(
chunk
->
skb
->
data
);
cause
=
ch
->
cause
;
elen
=
ntohs
(
ch
->
length
)
-
sizeof
(
sctp_errhdr_t
);
elen
=
WORD_ROUND
(
ntohs
(
ch
->
length
)
)
-
sizeof
(
sctp_errhdr_t
);
/* Pull off the ERROR header. */
skb_pull
(
chunk
->
skb
,
sizeof
(
sctp_errhdr_t
));
...
...
@@ -335,10 +336,8 @@ struct sctp_ulpevent *sctp_ulpevent_make_remote_error(
/* Copy the skb to a new skb with room for us to prepend
* notification with.
*/
skb
=
skb_copy_expand
(
chunk
->
skb
,
sizeof
(
struct
sctp_remote_error
),
/* headroom */
0
,
/* tailroom */
priority
);
skb
=
skb_copy_expand
(
chunk
->
skb
,
sizeof
(
struct
sctp_remote_error
),
0
,
gfp
);
/* Pull off the rest of the cause TLV from the chunk. */
skb_pull
(
chunk
->
skb
,
elen
);
...
...
@@ -419,23 +418,27 @@ struct sctp_ulpevent *sctp_ulpevent_make_remote_error(
* 5.3.1.4 SCTP_SEND_FAILED
*/
struct
sctp_ulpevent
*
sctp_ulpevent_make_send_failed
(
const
s
ctp_association_t
*
asoc
,
sctp_chunk_t
*
chunk
,
__u16
flags
,
__u32
error
,
int
priority
)
const
s
truct
sctp_association
*
asoc
,
struct
sctp_chunk
*
chunk
,
__u16
flags
,
__u32
error
,
int
gfp
)
{
struct
sctp_ulpevent
*
event
;
struct
sctp_send_failed
*
ssf
;
struct
sk_buff
*
skb
;
/* Pull off any padding. */
int
len
=
ntohs
(
chunk
->
chunk_hdr
->
length
);
/* Make skb with more room so we can prepend notification. */
skb
=
skb_copy_expand
(
chunk
->
skb
,
sizeof
(
struct
sctp_send_failed
),
/* headroom */
0
,
/* tailroom */
priority
);
gfp
);
if
(
!
skb
)
goto
fail
;
/* Pull off the common chunk header and DATA header. */
skb_pull
(
skb
,
sizeof
(
sctp_data_chunk_t
));
skb_pull
(
skb
,
sizeof
(
struct
sctp_data_chunk
));
len
-=
sizeof
(
struct
sctp_data_chunk
);
/* Embed the event fields inside the cloned skb. */
event
=
sctp_skb2event
(
skb
);
...
...
@@ -476,7 +479,8 @@ struct sctp_ulpevent *sctp_ulpevent_make_send_failed(
* This field is the total length of the notification data, including
* the notification header.
*/
ssf
->
ssf_length
=
skb
->
len
;
ssf
->
ssf_length
=
sizeof
(
struct
sctp_send_failed
)
+
len
;
skb_trim
(
skb
,
ssf
->
ssf_length
);
/* Socket Extensions for SCTP
* 5.3.1.4 SCTP_SEND_FAILED
...
...
@@ -497,6 +501,11 @@ struct sctp_ulpevent *sctp_ulpevent_make_send_failed(
*/
memcpy
(
&
ssf
->
ssf_info
,
&
chunk
->
sinfo
,
sizeof
(
struct
sctp_sndrcvinfo
));
/* Per TSVWG discussion with Randy. Allow the application to
* ressemble a fragmented message.
*/
ssf
->
ssf_info
.
sinfo_flags
=
chunk
->
chunk_hdr
->
flags
;
/* Socket Extensions for SCTP
* 5.3.1.4 SCTP_SEND_FAILED
*
...
...
@@ -521,15 +530,15 @@ struct sctp_ulpevent *sctp_ulpevent_make_send_failed(
* 5.3.1.5 SCTP_SHUTDOWN_EVENT
*/
struct
sctp_ulpevent
*
sctp_ulpevent_make_shutdown_event
(
const
s
ctp_association_t
*
asoc
,
__u16
flags
,
int
priority
)
const
s
truct
sctp_association
*
asoc
,
__u16
flags
,
int
gfp
)
{
struct
sctp_ulpevent
*
event
;
struct
sctp_shutdown_event
*
sse
;
struct
sk_buff
*
skb
;
event
=
sctp_ulpevent_new
(
sizeof
(
struct
sctp_assoc_change
),
MSG_NOTIFICATION
,
priority
);
MSG_NOTIFICATION
,
gfp
);
if
(
!
event
)
goto
fail
;
...
...
@@ -586,8 +595,9 @@ struct sctp_ulpevent *sctp_ulpevent_make_shutdown_event(
* Socket Extensions for SCTP
* 5.2.2 SCTP Header Information Structure (SCTP_SNDRCV)
*/
struct
sctp_ulpevent
*
sctp_ulpevent_make_rcvmsg
(
sctp_association_t
*
asoc
,
sctp_chunk_t
*
chunk
,
int
priority
)
struct
sctp_ulpevent
*
sctp_ulpevent_make_rcvmsg
(
struct
sctp_association
*
asoc
,
struct
sctp_chunk
*
chunk
,
int
gfp
)
{
struct
sctp_ulpevent
*
event
;
struct
sctp_sndrcvinfo
*
info
;
...
...
@@ -595,7 +605,7 @@ struct sctp_ulpevent *sctp_ulpevent_make_rcvmsg(sctp_association_t *asoc,
size_t
padding
,
len
;
/* Clone the original skb, sharing the data. */
skb
=
skb_clone
(
chunk
->
skb
,
priority
);
skb
=
skb_clone
(
chunk
->
skb
,
gfp
);
if
(
!
skb
)
goto
fail
;
...
...
@@ -631,7 +641,7 @@ struct sctp_ulpevent *sctp_ulpevent_make_rcvmsg(sctp_association_t *asoc,
event
->
iif
=
sctp_chunk_iif
(
chunk
);
/* Note: Not clearing the entire event struct as
* this is just a fragment of the real event. However,
* we still need to do rwnd accounting.
* we still need to do rwnd accounting.
*/
for
(
list
=
skb_shinfo
(
skb
)
->
frag_list
;
list
;
list
=
list
->
next
)
sctp_ulpevent_set_owner_r
(
list
,
asoc
);
...
...
@@ -690,16 +700,16 @@ struct sctp_ulpevent *sctp_ulpevent_make_rcvmsg(sctp_association_t *asoc,
info
->
sinfo_flags
|=
MSG_UNORDERED
;
/* sinfo_cumtsn: 32 bit (unsigned integer)
*
* This field will hold the current cumulative TSN as
* known by the underlying SCTP layer. Note this field is
* ignored when sending and only valid for a receive
*
* This field will hold the current cumulative TSN as
* known by the underlying SCTP layer. Note this field is
* ignored when sending and only valid for a receive
* operation when sinfo_flags are set to MSG_UNORDERED.
*/
info
->
sinfo_cumtsn
=
sctp_tsnmap_get_ctsn
(
&
asoc
->
peer
.
tsn_map
);
}
/* Note: For reassembly, we need to have the fragmentation bits.
/* Note: For reassembly, we need to have the fragmentation bits.
* For now, merge these into the msg_flags, since those bit
* possitions are not used.
*/
...
...
@@ -732,7 +742,7 @@ struct sctp_ulpevent *sctp_ulpevent_make_rcvmsg(sctp_association_t *asoc,
return
NULL
;
}
/* Create a partial delivery related event.
/* Create a partial delivery related event.
*
* 5.3.1.7 SCTP_PARTIAL_DELIVERY_EVENT
*
...
...
@@ -741,14 +751,14 @@ struct sctp_ulpevent *sctp_ulpevent_make_rcvmsg(sctp_association_t *asoc,
* various events.
*/
struct
sctp_ulpevent
*
sctp_ulpevent_make_pdapi
(
const
s
ctp_association_t
*
asoc
,
__u32
indication
,
int
priority
)
const
s
truct
sctp_association
*
asoc
,
__u32
indication
,
int
gfp
)
{
struct
sctp_ulpevent
*
event
;
struct
sctp_rcv_pdapi_event
*
pd
;
struct
sk_buff
*
skb
;
event
=
sctp_ulpevent_new
(
sizeof
(
struct
sctp_assoc_change
),
MSG_NOTIFICATION
,
priority
);
MSG_NOTIFICATION
,
gfp
);
if
(
!
event
)
goto
fail
;
...
...
@@ -780,7 +790,7 @@ struct sctp_ulpevent *sctp_ulpevent_make_pdapi(
pd
->
pdapi_indication
=
indication
;
/* pdapi_assoc_id: sizeof (sctp_assoc_t)
*
*
* The association id field, holds the identifier for the association.
*/
pd
->
pdapi_assoc_id
=
sctp_assoc2id
(
asoc
);
...
...
@@ -817,7 +827,7 @@ void sctp_ulpevent_read_sndrcvinfo(const struct sctp_ulpevent *event,
/* Do accounting for bytes just read by user. */
static
void
sctp_rcvmsg_rfree
(
struct
sk_buff
*
skb
)
{
s
ctp_association_t
*
asoc
;
s
truct
sctp_association
*
asoc
;
struct
sctp_ulpevent
*
event
;
/* Current stack structures assume that the rcv buffer is
...
...
@@ -834,7 +844,7 @@ static void sctp_rcvmsg_rfree(struct sk_buff *skb)
/* Charge receive window for bytes received. */
static
void
sctp_ulpevent_set_owner_r
(
struct
sk_buff
*
skb
,
s
ctp_association_t
*
asoc
)
s
truct
sctp_association
*
asoc
)
{
struct
sctp_ulpevent
*
event
;
...
...
net/sctp/ulpqueue.c
View file @
3e446c25
...
...
@@ -57,11 +57,11 @@ static inline struct sctp_ulpevent *sctp_ulpq_order(struct sctp_ulpq *,
/* 1st Level Abstractions */
/* Create a new ULP queue. */
struct
sctp_ulpq
*
sctp_ulpq_new
(
s
ctp_association_t
*
asoc
,
int
priority
)
struct
sctp_ulpq
*
sctp_ulpq_new
(
s
truct
sctp_association
*
asoc
,
int
gfp
)
{
struct
sctp_ulpq
*
ulpq
;
ulpq
=
kmalloc
(
sizeof
(
struct
sctp_ulpq
),
priority
);
ulpq
=
kmalloc
(
sizeof
(
struct
sctp_ulpq
),
gfp
);
if
(
!
ulpq
)
goto
fail
;
if
(
!
sctp_ulpq_init
(
ulpq
,
asoc
))
...
...
@@ -77,7 +77,7 @@ struct sctp_ulpq *sctp_ulpq_new(sctp_association_t *asoc, int priority)
/* Initialize a ULP queue from a block of memory. */
struct
sctp_ulpq
*
sctp_ulpq_init
(
struct
sctp_ulpq
*
ulpq
,
s
ctp_association_t
*
asoc
)
s
truct
sctp_association
*
asoc
)
{
memset
(
ulpq
,
sizeof
(
struct
sctp_ulpq
),
0x00
);
...
...
@@ -118,8 +118,8 @@ void sctp_ulpq_free(struct sctp_ulpq *ulpq)
}
/* Process an incoming DATA chunk. */
int
sctp_ulpq_tail_data
(
struct
sctp_ulpq
*
ulpq
,
s
ctp_chunk_t
*
chunk
,
int
priority
)
int
sctp_ulpq_tail_data
(
struct
sctp_ulpq
*
ulpq
,
s
truct
sctp_chunk
*
chunk
,
int
gfp
)
{
struct
sk_buff_head
temp
;
sctp_data_chunk_t
*
hdr
;
...
...
@@ -128,7 +128,7 @@ int sctp_ulpq_tail_data(struct sctp_ulpq *ulpq, sctp_chunk_t *chunk,
hdr
=
(
sctp_data_chunk_t
*
)
chunk
->
chunk_hdr
;
/* Create an event from the incoming chunk. */
event
=
sctp_ulpevent_make_rcvmsg
(
chunk
->
asoc
,
chunk
,
priority
);
event
=
sctp_ulpevent_make_rcvmsg
(
chunk
->
asoc
,
chunk
,
gfp
);
if
(
!
event
)
return
-
ENOMEM
;
...
...
@@ -253,6 +253,21 @@ static inline void sctp_ulpq_store_reasm(struct sctp_ulpq *ulpq,
tsn
=
event
->
sndrcvinfo
.
sinfo_tsn
;
/* See if it belongs at the end. */
pos
=
skb_peek_tail
(
&
ulpq
->
reasm
);
if
(
!
pos
)
{
__skb_queue_tail
(
&
ulpq
->
reasm
,
sctp_event2skb
(
event
));
return
;
}
/* Short circuit just dropping it at the end. */
cevent
=
sctp_skb2event
(
pos
);
ctsn
=
cevent
->
sndrcvinfo
.
sinfo_tsn
;
if
(
TSN_lt
(
ctsn
,
tsn
))
{
__skb_queue_tail
(
&
ulpq
->
reasm
,
sctp_event2skb
(
event
));
return
;
}
/* Find the right place in this list. We store them by TSN. */
skb_queue_walk
(
&
ulpq
->
reasm
,
pos
)
{
cevent
=
sctp_skb2event
(
pos
);
...
...
@@ -262,12 +277,9 @@ static inline void sctp_ulpq_store_reasm(struct sctp_ulpq *ulpq,
break
;
}
/* If the queue is empty, we have a different function to call. */
if
(
skb_peek
(
&
ulpq
->
reasm
))
__skb_insert
(
sctp_event2skb
(
event
),
pos
->
prev
,
pos
,
&
ulpq
->
reasm
);
else
__skb_queue_tail
(
&
ulpq
->
reasm
,
sctp_event2skb
(
event
));
/* Insert before pos. */
__skb_insert
(
sctp_event2skb
(
event
),
pos
->
prev
,
pos
,
&
ulpq
->
reasm
);
}
/* Helper function to return an event corresponding to the reassembled
...
...
@@ -592,8 +604,27 @@ static inline void sctp_ulpq_store_ordered(struct sctp_ulpq *ulpq,
__u16
sid
,
csid
;
__u16
ssn
,
cssn
;
pos
=
skb_peek_tail
(
&
ulpq
->
lobby
);
if
(
!
pos
)
{
__skb_queue_tail
(
&
ulpq
->
lobby
,
sctp_event2skb
(
event
));
return
;
}
sid
=
event
->
sndrcvinfo
.
sinfo_stream
;
ssn
=
event
->
sndrcvinfo
.
sinfo_ssn
;
cevent
=
(
struct
sctp_ulpevent
*
)
pos
->
cb
;
csid
=
cevent
->
sndrcvinfo
.
sinfo_stream
;
cssn
=
cevent
->
sndrcvinfo
.
sinfo_ssn
;
if
(
sid
>
csid
)
{
__skb_queue_tail
(
&
ulpq
->
lobby
,
sctp_event2skb
(
event
));
return
;
}
if
((
sid
==
csid
)
&&
SSN_lt
(
cssn
,
ssn
))
{
__skb_queue_tail
(
&
ulpq
->
lobby
,
sctp_event2skb
(
event
));
return
;
}
/* Find the right place in this list. We store them by
* stream ID and then by SSN.
...
...
@@ -609,12 +640,10 @@ static inline void sctp_ulpq_store_ordered(struct sctp_ulpq *ulpq,
break
;
}
/* If the queue is empty, we have a different function to call. */
if
(
skb_peek
(
&
ulpq
->
lobby
))
__skb_insert
(
sctp_event2skb
(
event
),
pos
->
prev
,
pos
,
&
ulpq
->
lobby
);
else
__skb_queue_tail
(
&
ulpq
->
lobby
,
sctp_event2skb
(
event
));
/* Insert before pos. */
__skb_insert
(
sctp_event2skb
(
event
),
pos
->
prev
,
pos
,
&
ulpq
->
lobby
);
}
static
inline
struct
sctp_ulpevent
*
sctp_ulpq_order
(
struct
sctp_ulpq
*
ulpq
,
...
...
@@ -705,7 +734,7 @@ static __u16 sctp_ulpq_renege_frags(struct sctp_ulpq *ulpq, __u16 needed)
/* Partial deliver the first message as there is pressure on rwnd. */
void
sctp_ulpq_partial_delivery
(
struct
sctp_ulpq
*
ulpq
,
struct
sctp_chunk
*
chunk
,
int
priority
)
struct
sctp_chunk
*
chunk
,
int
gfp
)
{
struct
sctp_ulpevent
*
event
;
struct
sctp_association
*
asoc
;
...
...
@@ -729,7 +758,7 @@ void sctp_ulpq_partial_delivery(struct sctp_ulpq *ulpq,
/* Renege some packets to make room for an incoming chunk. */
void
sctp_ulpq_renege
(
struct
sctp_ulpq
*
ulpq
,
struct
sctp_chunk
*
chunk
,
int
priority
)
int
gfp
)
{
struct
sctp_association
*
asoc
;
__u16
needed
,
freed
;
...
...
@@ -755,9 +784,9 @@ void sctp_ulpq_renege(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
__u32
tsn
;
tsn
=
ntohl
(
chunk
->
subh
.
data_hdr
->
tsn
);
sctp_tsnmap_mark
(
&
asoc
->
peer
.
tsn_map
,
tsn
);
sctp_ulpq_tail_data
(
ulpq
,
chunk
,
priority
);
sctp_ulpq_tail_data
(
ulpq
,
chunk
,
gfp
);
sctp_ulpq_partial_delivery
(
ulpq
,
chunk
,
priority
);
sctp_ulpq_partial_delivery
(
ulpq
,
chunk
,
gfp
);
}
return
;
...
...
@@ -768,7 +797,7 @@ void sctp_ulpq_renege(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
/* Notify the application if an association is aborted and in
* partial delivery mode. Send up any pending received messages.
*/
void
sctp_ulpq_abort_pd
(
struct
sctp_ulpq
*
ulpq
,
int
priority
)
void
sctp_ulpq_abort_pd
(
struct
sctp_ulpq
*
ulpq
,
int
gfp
)
{
struct
sctp_ulpevent
*
ev
=
NULL
;
struct
sock
*
sk
;
...
...
@@ -781,7 +810,7 @@ void sctp_ulpq_abort_pd(struct sctp_ulpq *ulpq, int priority)
&
sctp_sk
(
sk
)
->
subscribe
))
ev
=
sctp_ulpevent_make_pdapi
(
ulpq
->
asoc
,
SCTP_PARTIAL_DELIVERY_ABORTED
,
priority
);
gfp
);
if
(
ev
)
__skb_queue_tail
(
&
sk
->
receive_queue
,
sctp_event2skb
(
ev
));
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment