Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
9850a96f
Commit
9850a96f
authored
Mar 24, 2003
by
Jon Grimm
Browse files
Options
Browse Files
Download
Plain Diff
Merge touki.austin.ibm.com:/home/jgrimm/bk/linux-2.5.66
into touki.austin.ibm.com:/home/jgrimm/bk/lksctp-2.5.work
parents
42382f86
a33b4399
Changes
9
Show whitespace changes
Inline
Side-by-side
Showing
9 changed files
with
1202 additions
and
1151 deletions
+1202
-1151
include/net/sctp/constants.h
include/net/sctp/constants.h
+3
-5
include/net/sctp/sctp.h
include/net/sctp/sctp.h
+31
-22
include/net/sctp/structs.h
include/net/sctp/structs.h
+6
-3
net/sctp/associola.c
net/sctp/associola.c
+12
-25
net/sctp/input.c
net/sctp/input.c
+110
-64
net/sctp/ipv6.c
net/sctp/ipv6.c
+63
-18
net/sctp/output.c
net/sctp/output.c
+40
-2
net/sctp/outqueue.c
net/sctp/outqueue.c
+6
-7
net/sctp/sm_sideeffect.c
net/sctp/sm_sideeffect.c
+931
-1005
No files found.
include/net/sctp/constants.h
View file @
9850a96f
...
@@ -138,12 +138,10 @@ typedef enum {
...
@@ -138,12 +138,10 @@ typedef enum {
*/
*/
typedef
union
{
typedef
union
{
sctp_cid_t
chunk
;
sctp_cid_t
chunk
;
sctp_event_timeout_t
timeout
;
sctp_event_timeout_t
timeout
;
sctp_event_other_t
other
;
sctp_event_other_t
other
;
sctp_event_primitive_t
primitive
;
sctp_event_primitive_t
primitive
;
}
sctp_subtype_t
;
}
sctp_subtype_t
;
#define SCTP_SUBTYPE_CONSTRUCTOR(_name, _type, _elt) \
#define SCTP_SUBTYPE_CONSTRUCTOR(_name, _type, _elt) \
...
@@ -421,9 +419,9 @@ typedef enum {
...
@@ -421,9 +419,9 @@ typedef enum {
/* Reasons to retransmit. */
/* Reasons to retransmit. */
typedef
enum
{
typedef
enum
{
SCTP_R
ETRANSMIT
_T3_RTX
,
SCTP_R
TXR
_T3_RTX
,
SCTP_R
ETRANSMIT
_FAST_RTX
,
SCTP_R
TXR
_FAST_RTX
,
SCTP_R
ETRANSMIT_PMTU_DISCOVERY
,
SCTP_R
TXR_PMTUD
,
}
sctp_retransmit_reason_t
;
}
sctp_retransmit_reason_t
;
/* Reasons to lower cwnd. */
/* Reasons to lower cwnd. */
...
...
include/net/sctp/sctp.h
View file @
9850a96f
...
@@ -130,7 +130,7 @@ extern struct sctp_pf *sctp_get_pf_specific(sa_family_t family);
...
@@ -130,7 +130,7 @@ extern struct sctp_pf *sctp_get_pf_specific(sa_family_t family);
extern
int
sctp_register_pf
(
struct
sctp_pf
*
,
sa_family_t
);
extern
int
sctp_register_pf
(
struct
sctp_pf
*
,
sa_family_t
);
/*
/*
* sctp
_
socket.c
* sctp
/
socket.c
*/
*/
extern
int
sctp_backlog_rcv
(
struct
sock
*
sk
,
struct
sk_buff
*
skb
);
extern
int
sctp_backlog_rcv
(
struct
sock
*
sk
,
struct
sk_buff
*
skb
);
extern
int
sctp_inet_listen
(
struct
socket
*
sock
,
int
backlog
);
extern
int
sctp_inet_listen
(
struct
socket
*
sock
,
int
backlog
);
...
@@ -139,7 +139,7 @@ extern unsigned int sctp_poll(struct file *file, struct socket *sock,
...
@@ -139,7 +139,7 @@ extern unsigned int sctp_poll(struct file *file, struct socket *sock,
poll_table
*
wait
);
poll_table
*
wait
);
/*
/*
* sctp
_
primitive.c
* sctp
/
primitive.c
*/
*/
extern
int
sctp_primitive_ASSOCIATE
(
sctp_association_t
*
,
void
*
arg
);
extern
int
sctp_primitive_ASSOCIATE
(
sctp_association_t
*
,
void
*
arg
);
extern
int
sctp_primitive_SHUTDOWN
(
sctp_association_t
*
,
void
*
arg
);
extern
int
sctp_primitive_SHUTDOWN
(
sctp_association_t
*
,
void
*
arg
);
...
@@ -148,14 +148,14 @@ extern int sctp_primitive_SEND(sctp_association_t *, void *arg);
...
@@ -148,14 +148,14 @@ extern int sctp_primitive_SEND(sctp_association_t *, void *arg);
extern
int
sctp_primitive_REQUESTHEARTBEAT
(
sctp_association_t
*
,
void
*
arg
);
extern
int
sctp_primitive_REQUESTHEARTBEAT
(
sctp_association_t
*
,
void
*
arg
);
/*
/*
* sctp
_
crc32c.c
* sctp
/
crc32c.c
*/
*/
extern
__u32
sctp_start_cksum
(
__u8
*
ptr
,
__u16
count
);
extern
__u32
sctp_start_cksum
(
__u8
*
ptr
,
__u16
count
);
extern
__u32
sctp_update_cksum
(
__u8
*
ptr
,
__u16
count
,
__u32
cksum
);
extern
__u32
sctp_update_cksum
(
__u8
*
ptr
,
__u16
count
,
__u32
cksum
);
extern
__u32
sctp_end_cksum
(
__u32
cksum
);
extern
__u32
sctp_end_cksum
(
__u32
cksum
);
/*
/*
* sctp
_
input.c
* sctp
/
input.c
*/
*/
extern
int
sctp_rcv
(
struct
sk_buff
*
skb
);
extern
int
sctp_rcv
(
struct
sk_buff
*
skb
);
extern
void
sctp_v4_err
(
struct
sk_buff
*
skb
,
u32
info
);
extern
void
sctp_v4_err
(
struct
sk_buff
*
skb
,
u32
info
);
...
@@ -170,9 +170,16 @@ extern void __sctp_unhash_endpoint(sctp_endpoint_t *);
...
@@ -170,9 +170,16 @@ extern void __sctp_unhash_endpoint(sctp_endpoint_t *);
extern
sctp_association_t
*
__sctp_lookup_association
(
const
union
sctp_addr
*
,
extern
sctp_association_t
*
__sctp_lookup_association
(
const
union
sctp_addr
*
,
const
union
sctp_addr
*
,
const
union
sctp_addr
*
,
struct
sctp_transport
**
);
struct
sctp_transport
**
);
extern
struct
sock
*
sctp_err_lookup
(
int
family
,
struct
sk_buff
*
,
struct
sctphdr
*
,
struct
sctp_endpoint
**
,
struct
sctp_association
**
,
struct
sctp_transport
**
);
extern
void
sctp_err_finish
(
struct
sock
*
,
struct
sctp_endpoint
*
,
struct
sctp_association
*
);
extern
void
sctp_icmp_frag_needed
(
struct
sock
*
,
struct
sctp_association
*
,
struct
sctp_transport
*
t
,
__u32
pmtu
);
/*
/*
* sctp
_
hashdriver.c
* sctp
/
hashdriver.c
*/
*/
extern
void
sctp_hash_digest
(
const
char
*
secret
,
const
int
secret_len
,
extern
void
sctp_hash_digest
(
const
char
*
secret
,
const
int
secret_len
,
const
char
*
text
,
const
int
text_len
,
const
char
*
text
,
const
int
text_len
,
...
@@ -184,9 +191,7 @@ extern void sctp_hash_digest(const char *secret, const int secret_len,
...
@@ -184,9 +191,7 @@ extern void sctp_hash_digest(const char *secret, const int secret_len,
#ifdef TEST_FRAME
#ifdef TEST_FRAME
#include <test_frame.h>
#include <test_frame.h>
#else
#else
/* spin lock wrappers. */
/* spin lock wrappers. */
...
@@ -312,7 +317,6 @@ static inline void sctp_sysctl_register(void) { return; }
...
@@ -312,7 +317,6 @@ static inline void sctp_sysctl_register(void) { return; }
static
inline
void
sctp_sysctl_unregister
(
void
)
{
return
;
}
static
inline
void
sctp_sysctl_unregister
(
void
)
{
return
;
}
#endif
#endif
/* Size of Supported Address Parameter for 'x' address types. */
/* Size of Supported Address Parameter for 'x' address types. */
#define SCTP_SAT_LEN(x) (sizeof(struct sctp_paramhdr) + (x) * sizeof(__u16))
#define SCTP_SAT_LEN(x) (sizeof(struct sctp_paramhdr) + (x) * sizeof(__u16))
...
@@ -320,19 +324,15 @@ static inline void sctp_sysctl_unregister(void) { return; }
...
@@ -320,19 +324,15 @@ static inline void sctp_sysctl_unregister(void) { return; }
extern
int
sctp_v6_init
(
void
);
extern
int
sctp_v6_init
(
void
);
extern
void
sctp_v6_exit
(
void
);
extern
void
sctp_v6_exit
(
void
);
static
inline
int
sctp_ipv6_addr_type
(
const
struct
in6_addr
*
addr
)
extern
void
sctp_v6_err
(
struct
sk_buff
*
skb
,
struct
inet6_skb_parm
*
opt
,
{
int
type
,
int
code
,
int
offset
,
__u32
info
);
return
ipv6_addr_type
((
struct
in6_addr
*
)
addr
);
}
#else
/* #ifdef defined(CONFIG_IPV6)
|| defined(CONFIG_IPV6_MODULE)
*/
#else
/* #ifdef defined(CONFIG_IPV6) */
#define sctp_ipv6_addr_type(a) 0
static
inline
int
sctp_v6_init
(
void
)
{
return
0
;
}
static
inline
int
sctp_v6_init
(
void
)
{
return
0
;
}
static
inline
void
sctp_v6_exit
(
void
)
{
return
;
}
static
inline
void
sctp_v6_exit
(
void
)
{
return
;
}
#endif
/* #ifdef defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) */
#endif
/* #if defined(CONFIG_IPV6) */
/* Map an association to an assoc_id. */
/* Map an association to an assoc_id. */
static
inline
sctp_assoc_t
sctp_assoc2id
(
const
sctp_association_t
*
asoc
)
static
inline
sctp_assoc_t
sctp_assoc2id
(
const
sctp_association_t
*
asoc
)
...
@@ -421,6 +421,15 @@ static inline __s32 sctp_jitter(__u32 rto)
...
@@ -421,6 +421,15 @@ static inline __s32 sctp_jitter(__u32 rto)
return
ret
;
return
ret
;
}
}
/* Break down data chunks at this point. */
static
inline
int
sctp_frag_point
(
int
pmtu
)
{
pmtu
-=
SCTP_IP_OVERHEAD
+
sizeof
(
struct
sctp_data_chunk
);
pmtu
-=
sizeof
(
struct
sctp_sack_chunk
);
return
pmtu
;
}
/* Walk through a list of TLV parameters. Don't trust the
/* Walk through a list of TLV parameters. Don't trust the
* individual parameter lengths and instead depend on
* individual parameter lengths and instead depend on
* the chunk length to indicate when to stop. Make sure
* the chunk length to indicate when to stop. Make sure
...
@@ -537,7 +546,7 @@ struct sctp_sock {
...
@@ -537,7 +546,7 @@ struct sctp_sock {
struct
sock
sk
;
struct
sock
sk
;
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
struct
ipv6_pinfo
*
pinet6
;
struct
ipv6_pinfo
*
pinet6
;
#endif
/* CONFIG_IPV6
|| CONFIG_IPV6_MODULE
*/
#endif
/* CONFIG_IPV6 */
struct
inet_opt
inet
;
struct
inet_opt
inet
;
struct
sctp_opt
sctp
;
struct
sctp_opt
sctp
;
};
};
...
@@ -550,7 +559,7 @@ struct sctp6_sock {
...
@@ -550,7 +559,7 @@ struct sctp6_sock {
struct
sctp_opt
sctp
;
struct
sctp_opt
sctp
;
struct
ipv6_pinfo
inet6
;
struct
ipv6_pinfo
inet6
;
};
};
#endif
/* CONFIG_IPV6
|| CONFIG_IPV6_MODULE
*/
#endif
/* CONFIG_IPV6 */
#define sctp_sk(__sk) (&((struct sctp_sock *)__sk)->sctp)
#define sctp_sk(__sk) (&((struct sctp_sock *)__sk)->sctp)
...
...
include/net/sctp/structs.h
View file @
9850a96f
...
@@ -590,13 +590,16 @@ struct sctp_packet {
...
@@ -590,13 +590,16 @@ struct sctp_packet {
/* This packet should advertise ECN capability to the network
/* This packet should advertise ECN capability to the network
* via the ECT bit.
* via the ECT bit.
*/
*/
int
ecn_capable
;
char
ecn_capable
;
/* This packet contains a COOKIE-ECHO chunk. */
/* This packet contains a COOKIE-ECHO chunk. */
int
has_cookie_echo
;
char
has_cookie_echo
;
/* This packet containsa SACK chunk. */
char
has_sack
;
/* SCTP cannot fragment this packet. So let ip fragment it. */
/* SCTP cannot fragment this packet. So let ip fragment it. */
int
ipfragok
;
char
ipfragok
;
int
malloced
;
int
malloced
;
};
};
...
...
net/sctp/associola.c
View file @
9850a96f
...
@@ -421,8 +421,7 @@ struct sctp_transport *sctp_assoc_add_peer(struct sctp_association *asoc,
...
@@ -421,8 +421,7 @@ struct sctp_transport *sctp_assoc_add_peer(struct sctp_association *asoc,
SCTP_DEBUG_PRINTK
(
"sctp_assoc_add_peer:association %p PMTU set to "
SCTP_DEBUG_PRINTK
(
"sctp_assoc_add_peer:association %p PMTU set to "
"%d
\n
"
,
asoc
,
asoc
->
pmtu
);
"%d
\n
"
,
asoc
,
asoc
->
pmtu
);
asoc
->
frag_point
=
asoc
->
pmtu
;
asoc
->
frag_point
=
sctp_frag_point
(
asoc
->
pmtu
);
asoc
->
frag_point
-=
SCTP_IP_OVERHEAD
+
sizeof
(
struct
sctp_data_chunk
);
/* The asoc->peer.port might not be meaningful yet, but
/* The asoc->peer.port might not be meaningful yet, but
* initialize the packet structure anyway.
* initialize the packet structure anyway.
...
@@ -658,31 +657,20 @@ int sctp_cmp_addr_exact(const union sctp_addr *ss1,
...
@@ -658,31 +657,20 @@ int sctp_cmp_addr_exact(const union sctp_addr *ss1,
}
}
/* Return an ecne chunk to get prepended to a packet.
/* Return an ecne chunk to get prepended to a packet.
* Note: We are sly and return a shared, prealloced chunk.
* Note: We are sly and return a shared, prealloced chunk. FIXME:
* No we don't, but we could/should.
*/
*/
sctp_chunk_t
*
sctp_get_ecne_prepend
(
s
ctp_association_t
*
asoc
)
sctp_chunk_t
*
sctp_get_ecne_prepend
(
s
truct
sctp_association
*
asoc
)
{
{
sctp_chunk_t
*
chunk
;
struct
sctp_chunk
*
chunk
;
int
need_ecne
;
__u32
lowest_tsn
;
/* Can be called from task or bh. Both need_ecne and
* last_ecne_tsn are written during bh.
*/
need_ecne
=
asoc
->
need_ecne
;
lowest_tsn
=
asoc
->
last_ecne_tsn
;
if
(
need_ecne
)
{
chunk
=
sctp_make_ecne
(
asoc
,
lowest_tsn
);
/* ECNE is not mandatory to the flow. Being unable to
/* Send ECNE if needed.
* alloc mem is not deadly. We are just unable to help
* Not being able to allocate a chunk here is not deadly.
* out the network. If we run out of memory, just return
* NULL.
*/
*/
}
else
{
if
(
asoc
->
need_ecne
)
chunk
=
sctp_make_ecne
(
asoc
,
asoc
->
last_ecne_tsn
);
else
chunk
=
NULL
;
chunk
=
NULL
;
}
return
chunk
;
return
chunk
;
}
}
...
@@ -986,8 +974,7 @@ void sctp_assoc_sync_pmtu(sctp_association_t *asoc)
...
@@ -986,8 +974,7 @@ void sctp_assoc_sync_pmtu(sctp_association_t *asoc)
if
(
pmtu
)
{
if
(
pmtu
)
{
asoc
->
pmtu
=
pmtu
;
asoc
->
pmtu
=
pmtu
;
asoc
->
frag_point
=
pmtu
-
(
SCTP_IP_OVERHEAD
+
asoc
->
frag_point
=
sctp_frag_point
(
pmtu
);
sizeof
(
sctp_data_chunk_t
));
}
}
SCTP_DEBUG_PRINTK
(
"%s: asoc:%p, pmtu:%d, frag_point:%d
\n
"
,
SCTP_DEBUG_PRINTK
(
"%s: asoc:%p, pmtu:%d, frag_point:%d
\n
"
,
...
...
net/sctp/input.c
View file @
9850a96f
...
@@ -207,21 +207,19 @@ int sctp_rcv(struct sk_buff *skb)
...
@@ -207,21 +207,19 @@ int sctp_rcv(struct sk_buff *skb)
*/
*/
sctp_bh_lock_sock
(
sk
);
sctp_bh_lock_sock
(
sk
);
if
(
sock_owned_by_user
(
sk
))
{
if
(
sock_owned_by_user
(
sk
))
sk_add_backlog
(
sk
,
(
struct
sk_buff
*
)
chunk
);
sk_add_backlog
(
sk
,
(
struct
sk_buff
*
)
chunk
);
}
else
{
else
sctp_backlog_rcv
(
sk
,
(
struct
sk_buff
*
)
chunk
);
sctp_backlog_rcv
(
sk
,
(
struct
sk_buff
*
)
chunk
);
}
/* Release the sock and any reference counts we took in the
/* Release the sock and any reference counts we took in the
* lookup calls.
* lookup calls.
*/
*/
sctp_bh_unlock_sock
(
sk
);
sctp_bh_unlock_sock
(
sk
);
if
(
asoc
)
{
if
(
asoc
)
sctp_association_put
(
asoc
);
sctp_association_put
(
asoc
);
}
else
{
else
sctp_endpoint_put
(
ep
);
sctp_endpoint_put
(
ep
);
}
sock_put
(
sk
);
sock_put
(
sk
);
return
ret
;
return
ret
;
...
@@ -268,10 +266,8 @@ int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb)
...
@@ -268,10 +266,8 @@ int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb)
}
}
/* Handle icmp frag needed error. */
/* Handle icmp frag needed error. */
static
inline
void
sctp_icmp_frag_needed
(
struct
sock
*
sk
,
void
sctp_icmp_frag_needed
(
struct
sock
*
sk
,
struct
sctp_association
*
asoc
,
sctp_association_t
*
asoc
,
struct
sctp_transport
*
t
,
__u32
pmtu
)
struct
sctp_transport
*
transport
,
__u32
pmtu
)
{
{
if
(
unlikely
(
pmtu
<
SCTP_DEFAULT_MINSEGMENT
))
{
if
(
unlikely
(
pmtu
<
SCTP_DEFAULT_MINSEGMENT
))
{
printk
(
KERN_WARNING
"%s: Reported pmtu %d too low, "
printk
(
KERN_WARNING
"%s: Reported pmtu %d too low, "
...
@@ -280,54 +276,38 @@ static inline void sctp_icmp_frag_needed(struct sock *sk,
...
@@ -280,54 +276,38 @@ static inline void sctp_icmp_frag_needed(struct sock *sk,
pmtu
=
SCTP_DEFAULT_MINSEGMENT
;
pmtu
=
SCTP_DEFAULT_MINSEGMENT
;
}
}
if
(
!
sock_owned_by_user
(
sk
)
&&
t
ransport
&&
(
transpor
t
->
pmtu
!=
pmtu
))
{
if
(
!
sock_owned_by_user
(
sk
)
&&
t
&&
(
t
->
pmtu
!=
pmtu
))
{
t
ransport
->
pmtu
=
pmtu
;
t
->
pmtu
=
pmtu
;
sctp_assoc_sync_pmtu
(
asoc
);
sctp_assoc_sync_pmtu
(
asoc
);
sctp_retransmit
(
&
asoc
->
outqueue
,
transport
,
sctp_retransmit
(
&
asoc
->
outqueue
,
t
,
SCTP_RTXR_PMTUD
);
SCTP_RETRANSMIT_PMTU_DISCOVERY
);
}
}
}
}
/*
/* Common lookup code for icmp/icmpv6 error handler. */
* This routine is called by the ICMP module when it gets some
struct
sock
*
sctp_err_lookup
(
int
family
,
struct
sk_buff
*
skb
,
* sort of error condition. If err < 0 then the socket should
struct
sctphdr
*
sctphdr
,
* be closed and the error returned to the user. If err > 0
struct
sctp_endpoint
**
epp
,
* it's just the icmp type << 8 | icmp code. After adjustment
struct
sctp_association
**
app
,
* header points to the first 8 bytes of the sctp header. We need
struct
sctp_transport
**
tpp
)
* to find the appropriate port.
*
* The locking strategy used here is very "optimistic". When
* someone else accesses the socket the ICMP is just dropped
* and for some paths there is no check at all.
* A more general error queue to queue errors for later handling
* is probably better.
*
*/
void
sctp_v4_err
(
struct
sk_buff
*
skb
,
__u32
info
)
{
{
struct
iphdr
*
iph
=
(
struct
iphdr
*
)
skb
->
data
;
union
sctp_addr
saddr
;
struct
sctphdr
*
sh
=
(
struct
sctphdr
*
)(
skb
->
data
+
(
iph
->
ihl
<<
2
));
union
sctp_addr
daddr
;
int
type
=
skb
->
h
.
icmph
->
type
;
struct
sctp_af
*
af
;
int
code
=
skb
->
h
.
icmph
->
code
;
union
sctp_addr
saddr
,
daddr
;
struct
inet_opt
*
inet
;
struct
sock
*
sk
=
NULL
;
struct
sock
*
sk
=
NULL
;
sctp_endpoint_t
*
ep
=
NULL
;
struct
sctp_endpoint
*
ep
=
NULL
;
sctp_association_t
*
asoc
=
NULL
;
struct
sctp_association
*
asoc
=
NULL
;
struct
sctp_transport
*
transport
;
struct
sctp_transport
*
transport
=
NULL
;
int
err
;
if
(
skb
->
len
<
((
iph
->
ihl
<<
2
)
+
8
))
{
*
app
=
NULL
;
*
epp
=
NULL
;
*
tpp
=
NULL
;
ICMP_INC_STATS_BH
(
IcmpInErrors
);
return
;
af
=
sctp_get_af_specific
(
family
);
if
(
unlikely
(
!
af
))
{
return
NULL
;
}
}
saddr
.
v4
.
sin_family
=
AF_INET
;
/* Initialize local addresses for lookups. */
saddr
.
v4
.
sin_port
=
ntohs
(
sh
->
source
);
af
->
from_skb
(
&
saddr
,
skb
,
1
);
memcpy
(
&
saddr
.
v4
.
sin_addr
.
s_addr
,
&
iph
->
saddr
,
sizeof
(
struct
in_addr
));
af
->
from_skb
(
&
daddr
,
skb
,
0
);
daddr
.
v4
.
sin_family
=
AF_INET
;
daddr
.
v4
.
sin_port
=
ntohs
(
sh
->
dest
);
memcpy
(
&
daddr
.
v4
.
sin_addr
.
s_addr
,
&
iph
->
daddr
,
sizeof
(
struct
in_addr
));
/* Look for an association that matches the incoming ICMP error
/* Look for an association that matches the incoming ICMP error
* packet.
* packet.
...
@@ -340,13 +320,12 @@ void sctp_v4_err(struct sk_buff *skb, __u32 info)
...
@@ -340,13 +320,12 @@ void sctp_v4_err(struct sk_buff *skb, __u32 info)
*/
*/
ep
=
__sctp_rcv_lookup_endpoint
(
&
daddr
);
ep
=
__sctp_rcv_lookup_endpoint
(
&
daddr
);
if
(
!
ep
)
{
if
(
!
ep
)
{
ICMP_INC_STATS_BH
(
IcmpInErrors
);
return
NULL
;
return
;
}
}
}
}
if
(
asoc
)
{
if
(
asoc
)
{
if
(
ntohl
(
s
h
->
vtag
)
!=
asoc
->
c
.
peer_vtag
)
{
if
(
ntohl
(
s
ctphdr
->
vtag
)
!=
asoc
->
c
.
peer_vtag
)
{
ICMP_INC_STATS_BH
(
IcmpInErrors
);
ICMP_INC_STATS_BH
(
IcmpInErrors
);
goto
out
;
goto
out
;
}
}
...
@@ -355,12 +334,90 @@ void sctp_v4_err(struct sk_buff *skb, __u32 info)
...
@@ -355,12 +334,90 @@ void sctp_v4_err(struct sk_buff *skb, __u32 info)
sk
=
ep
->
base
.
sk
;
sk
=
ep
->
base
.
sk
;
sctp_bh_lock_sock
(
sk
);
sctp_bh_lock_sock
(
sk
);
/* If too many ICMPs get dropped on busy
/* If too many ICMPs get dropped on busy
* servers this needs to be solved differently.
* servers this needs to be solved differently.
*/
*/
if
(
sock_owned_by_user
(
sk
))
if
(
sock_owned_by_user
(
sk
))
NET_INC_STATS_BH
(
LockDroppedIcmps
);
NET_INC_STATS_BH
(
LockDroppedIcmps
);
*
epp
=
ep
;
*
app
=
asoc
;
*
tpp
=
transport
;
return
sk
;
out:
sock_put
(
sk
);
if
(
asoc
)
sctp_association_put
(
asoc
);
if
(
ep
)
sctp_endpoint_put
(
ep
);
return
NULL
;
}
/* Common cleanup code for icmp/icmpv6 error handler. */
void
sctp_err_finish
(
struct
sock
*
sk
,
struct
sctp_endpoint
*
ep
,
struct
sctp_association
*
asoc
)
{
sctp_bh_unlock_sock
(
sk
);
sock_put
(
sk
);
if
(
asoc
)
sctp_association_put
(
asoc
);
if
(
ep
)
sctp_endpoint_put
(
ep
);
}
/*
* This routine is called by the ICMP module when it gets some
* sort of error condition. If err < 0 then the socket should
* be closed and the error returned to the user. If err > 0
* it's just the icmp type << 8 | icmp code. After adjustment
* header points to the first 8 bytes of the sctp header. We need
* to find the appropriate port.
*
* The locking strategy used here is very "optimistic". When
* someone else accesses the socket the ICMP is just dropped
* and for some paths there is no check at all.
* A more general error queue to queue errors for later handling
* is probably better.
*
*/
void
sctp_v4_err
(
struct
sk_buff
*
skb
,
__u32
info
)
{
struct
iphdr
*
iph
=
(
struct
iphdr
*
)
skb
->
data
;
struct
sctphdr
*
sh
=
(
struct
sctphdr
*
)(
skb
->
data
+
(
iph
->
ihl
<<
2
));
int
type
=
skb
->
h
.
icmph
->
type
;
int
code
=
skb
->
h
.
icmph
->
code
;
struct
sock
*
sk
;
sctp_endpoint_t
*
ep
;
sctp_association_t
*
asoc
;
struct
sctp_transport
*
transport
;
struct
inet_opt
*
inet
;
char
*
saveip
,
*
savesctp
;
int
err
;
if
(
skb
->
len
<
((
iph
->
ihl
<<
2
)
+
8
))
{
ICMP_INC_STATS_BH
(
IcmpInErrors
);
return
;
}
/* Fix up skb to look at the embedded net header. */
saveip
=
skb
->
nh
.
raw
;
savesctp
=
skb
->
h
.
raw
;
skb
->
nh
.
iph
=
iph
;
skb
->
h
.
raw
=
(
char
*
)
sh
;
sk
=
sctp_err_lookup
(
AF_INET
,
skb
,
sh
,
&
ep
,
&
asoc
,
&
transport
);
/* Put back, the original pointers. */
skb
->
nh
.
raw
=
saveip
;
skb
->
h
.
raw
=
savesctp
;
if
(
!
sk
)
{
ICMP_INC_STATS_BH
(
IcmpInErrors
);
return
;
}
/* Warning: The sock lock is held. Remember to call
* sctp_err_finish!
*/
switch
(
type
)
{
switch
(
type
)
{
case
ICMP_PARAMETERPROB
:
case
ICMP_PARAMETERPROB
:
err
=
EPROTO
;
err
=
EPROTO
;
...
@@ -399,13 +456,7 @@ void sctp_v4_err(struct sk_buff *skb, __u32 info)
...
@@ -399,13 +456,7 @@ void sctp_v4_err(struct sk_buff *skb, __u32 info)
}
}
out_unlock:
out_unlock:
sctp_bh_unlock_sock
(
sk
);
sctp_err_finish
(
sk
,
ep
,
asoc
);
out:
sock_put
(
sk
);
if
(
asoc
)
sctp_association_put
(
asoc
);
if
(
ep
)
sctp_endpoint_put
(
ep
);
}
}
/*
/*
...
@@ -782,8 +833,3 @@ sctp_association_t *__sctp_rcv_lookup(struct sk_buff *skb,
...
@@ -782,8 +833,3 @@ sctp_association_t *__sctp_rcv_lookup(struct sk_buff *skb,
return
asoc
;
return
asoc
;
}
}
net/sctp/ipv6.c
View file @
9850a96f
/* SCTP kernel reference Implementation
/* SCTP kernel reference Implementation
* Copyright (c) 2001 Nokia, Inc.
* Copyright (c) 2001 Nokia, Inc.
* Copyright (c) 2001 La Monte H.P. Yarroll
* Copyright (c) 2001 La Monte H.P. Yarroll
* Copyright (c) 2002 International Business Machines, Corp.
* Copyright (c) 2002
-2003
International Business Machines, Corp.
*
*
* This file is part of the SCTP kernel reference Implementation
* This file is part of the SCTP kernel reference Implementation
*
*
...
@@ -88,17 +88,62 @@ extern struct notifier_block sctp_inetaddr_notifier;
...
@@ -88,17 +88,62 @@ extern struct notifier_block sctp_inetaddr_notifier;
ntohs((addr)->s6_addr16[6]), \
ntohs((addr)->s6_addr16[6]), \
ntohs((addr)->s6_addr16[7])
ntohs((addr)->s6_addr16[7])
/* FIXME: Comments. */
/* ICMP error handler. */
static
inline
void
sctp_v6_err
(
struct
sk_buff
*
skb
,
void
sctp_v6_err
(
struct
sk_buff
*
skb
,
struct
inet6_skb_parm
*
opt
,
struct
inet6_skb_parm
*
opt
,
int
type
,
int
code
,
int
offset
,
__u32
info
)
int
type
,
int
code
,
int
offset
,
__u32
info
)
{
{
/* BUG. WRITE ME. */
struct
ipv6hdr
*
iph
=
(
struct
ipv6hdr
*
)
skb
->
data
;
struct
sctphdr
*
sh
=
(
struct
sctphdr
*
)(
skb
->
data
+
offset
);
struct
sock
*
sk
;
sctp_endpoint_t
*
ep
;
sctp_association_t
*
asoc
;
struct
sctp_transport
*
transport
;
struct
ipv6_pinfo
*
np
;
char
*
saveip
,
*
savesctp
;
int
err
;
/* Fix up skb to look at the embedded net header. */
saveip
=
skb
->
nh
.
raw
;
savesctp
=
skb
->
h
.
raw
;
skb
->
nh
.
ipv6h
=
iph
;
skb
->
h
.
raw
=
(
char
*
)
sh
;
sk
=
sctp_err_lookup
(
AF_INET6
,
skb
,
sh
,
&
ep
,
&
asoc
,
&
transport
);
/* Put back, the original pointers. */
skb
->
nh
.
raw
=
saveip
;
skb
->
h
.
raw
=
savesctp
;
if
(
!
sk
)
{
ICMP6_INC_STATS_BH
(
Icmp6InErrors
);
return
;
}
/* Warning: The sock lock is held. Remember to call
* sctp_err_finish!
*/
switch
(
type
)
{
case
ICMPV6_PKT_TOOBIG
:
sctp_icmp_frag_needed
(
sk
,
asoc
,
transport
,
ntohl
(
info
));
goto
out_unlock
;
default:
break
;
}
np
=
inet6_sk
(
sk
);
icmpv6_err_convert
(
type
,
code
,
&
err
);
if
(
!
sock_owned_by_user
(
sk
)
&&
np
->
recverr
)
{
sk
->
err
=
err
;
sk
->
error_report
(
sk
);
}
else
{
/* Only an error on timeout */
sk
->
err_soft
=
err
;
}
out_unlock:
sctp_err_finish
(
sk
,
ep
,
asoc
);
}
}
/* Based on tcp_v6_xmit() in tcp_ipv6.c. */
/* Based on tcp_v6_xmit() in tcp_ipv6.c. */
static
in
line
int
sctp_v6_xmit
(
struct
sk_buff
*
skb
,
static
in
t
sctp_v6_xmit
(
struct
sk_buff
*
skb
,
struct
sctp_transport
*
transport
,
struct
sctp_transport
*
transport
,
int
ipfragok
)
int
ipfragok
)
{
{
struct
sock
*
sk
=
skb
->
sk
;
struct
sock
*
sk
=
skb
->
sk
;
struct
ipv6_pinfo
*
np
=
inet6_sk
(
sk
);
struct
ipv6_pinfo
*
np
=
inet6_sk
(
sk
);
...
...
net/sctp/output.c
View file @
9850a96f
...
@@ -79,6 +79,7 @@ struct sctp_packet *sctp_packet_config(struct sctp_packet *packet,
...
@@ -79,6 +79,7 @@ struct sctp_packet *sctp_packet_config(struct sctp_packet *packet,
packet
->
ecn_capable
=
ecn_capable
;
packet
->
ecn_capable
=
ecn_capable
;
packet
->
get_prepend_chunk
=
prepend_handler
;
packet
->
get_prepend_chunk
=
prepend_handler
;
packet
->
has_cookie_echo
=
0
;
packet
->
has_cookie_echo
=
0
;
packet
->
has_sack
=
0
;
packet
->
ipfragok
=
0
;
packet
->
ipfragok
=
0
;
/* We might need to call the prepend_handler right away. */
/* We might need to call the prepend_handler right away. */
...
@@ -100,6 +101,7 @@ struct sctp_packet *sctp_packet_init(struct sctp_packet *packet,
...
@@ -100,6 +101,7 @@ struct sctp_packet *sctp_packet_init(struct sctp_packet *packet,
packet
->
ecn_capable
=
0
;
packet
->
ecn_capable
=
0
;
packet
->
get_prepend_chunk
=
NULL
;
packet
->
get_prepend_chunk
=
NULL
;
packet
->
has_cookie_echo
=
0
;
packet
->
has_cookie_echo
=
0
;
packet
->
has_sack
=
0
;
packet
->
ipfragok
=
0
;
packet
->
ipfragok
=
0
;
packet
->
malloced
=
0
;
packet
->
malloced
=
0
;
sctp_packet_reset
(
packet
);
sctp_packet_reset
(
packet
);
...
@@ -155,6 +157,37 @@ sctp_xmit_t sctp_packet_transmit_chunk(struct sctp_packet *packet,
...
@@ -155,6 +157,37 @@ sctp_xmit_t sctp_packet_transmit_chunk(struct sctp_packet *packet,
return
retval
;
return
retval
;
}
}
/* Try to bundle a SACK with the packet. */
static
sctp_xmit_t
sctp_packet_bundle_sack
(
struct
sctp_packet
*
pkt
,
struct
sctp_chunk
*
chunk
)
{
sctp_xmit_t
retval
=
SCTP_XMIT_OK
;
/* If sending DATA and haven't aleady bundled a SACK, try to
* bundle one in to the packet.
*/
if
(
sctp_chunk_is_data
(
chunk
)
&&
!
pkt
->
has_sack
&&
!
pkt
->
has_cookie_echo
)
{
struct
sctp_association
*
asoc
;
asoc
=
pkt
->
transport
->
asoc
;
if
(
asoc
->
a_rwnd
>
asoc
->
rwnd
)
{
struct
sctp_chunk
*
sack
;
asoc
->
a_rwnd
=
asoc
->
rwnd
;
sack
=
sctp_make_sack
(
asoc
);
if
(
sack
)
{
struct
timer_list
*
timer
;
retval
=
sctp_packet_append_chunk
(
pkt
,
sack
);
asoc
->
peer
.
sack_needed
=
0
;
timer
=
&
asoc
->
timers
[
SCTP_EVENT_TIMEOUT_SACK
];
if
(
timer_pending
(
timer
)
&&
del_timer
(
timer
))
sctp_association_put
(
asoc
);
}
}
}
return
retval
;
}
/* Append a chunk to the offered packet reporting back any inability to do
/* Append a chunk to the offered packet reporting back any inability to do
* so.
* so.
*/
*/
...
@@ -167,6 +200,10 @@ sctp_xmit_t sctp_packet_append_chunk(struct sctp_packet *packet,
...
@@ -167,6 +200,10 @@ sctp_xmit_t sctp_packet_append_chunk(struct sctp_packet *packet,
size_t
pmtu
;
size_t
pmtu
;
int
too_big
;
int
too_big
;
retval
=
sctp_packet_bundle_sack
(
packet
,
chunk
);
if
(
retval
!=
SCTP_XMIT_OK
)
goto
finish
;
pmtu
=
((
packet
->
transport
->
asoc
)
?
pmtu
=
((
packet
->
transport
->
asoc
)
?
(
packet
->
transport
->
asoc
->
pmtu
)
:
(
packet
->
transport
->
asoc
->
pmtu
)
:
(
packet
->
transport
->
pmtu
));
(
packet
->
transport
->
pmtu
));
...
@@ -216,9 +253,10 @@ sctp_xmit_t sctp_packet_append_chunk(struct sctp_packet *packet,
...
@@ -216,9 +253,10 @@ sctp_xmit_t sctp_packet_append_chunk(struct sctp_packet *packet,
retval
=
sctp_packet_append_data
(
packet
,
chunk
);
retval
=
sctp_packet_append_data
(
packet
,
chunk
);
if
(
SCTP_XMIT_OK
!=
retval
)
if
(
SCTP_XMIT_OK
!=
retval
)
goto
finish
;
goto
finish
;
}
else
if
(
SCTP_CID_COOKIE_ECHO
==
chunk
->
chunk_hdr
->
type
)
{
}
else
if
(
SCTP_CID_COOKIE_ECHO
==
chunk
->
chunk_hdr
->
type
)
packet
->
has_cookie_echo
=
1
;
packet
->
has_cookie_echo
=
1
;
}
else
if
(
SCTP_CID_SACK
==
chunk
->
chunk_hdr
->
type
)
packet
->
has_sack
=
1
;
/* It is OK to send this chunk. */
/* It is OK to send this chunk. */
__skb_queue_tail
(
&
packet
->
chunks
,
(
struct
sk_buff
*
)
chunk
);
__skb_queue_tail
(
&
packet
->
chunks
,
(
struct
sk_buff
*
)
chunk
);
...
...
net/sctp/outqueue.c
View file @
9850a96f
...
@@ -357,7 +357,7 @@ void sctp_retransmit(struct sctp_outq *q, struct sctp_transport *transport,
...
@@ -357,7 +357,7 @@ void sctp_retransmit(struct sctp_outq *q, struct sctp_transport *transport,
__u8
fast_retransmit
=
0
;
__u8
fast_retransmit
=
0
;
switch
(
reason
)
{
switch
(
reason
)
{
case
SCTP_R
ETRANSMIT
_T3_RTX
:
case
SCTP_R
TXR
_T3_RTX
:
sctp_transport_lower_cwnd
(
transport
,
SCTP_LOWER_CWND_T3_RTX
);
sctp_transport_lower_cwnd
(
transport
,
SCTP_LOWER_CWND_T3_RTX
);
/* Update the retran path if the T3-rtx timer has expired for
/* Update the retran path if the T3-rtx timer has expired for
* the current retran path.
* the current retran path.
...
@@ -365,10 +365,11 @@ void sctp_retransmit(struct sctp_outq *q, struct sctp_transport *transport,
...
@@ -365,10 +365,11 @@ void sctp_retransmit(struct sctp_outq *q, struct sctp_transport *transport,
if
(
transport
==
transport
->
asoc
->
peer
.
retran_path
)
if
(
transport
==
transport
->
asoc
->
peer
.
retran_path
)
sctp_assoc_update_retran_path
(
transport
->
asoc
);
sctp_assoc_update_retran_path
(
transport
->
asoc
);
break
;
break
;
case
SCTP_R
ETRANSMIT
_FAST_RTX
:
case
SCTP_R
TXR
_FAST_RTX
:
sctp_transport_lower_cwnd
(
transport
,
SCTP_LOWER_CWND_FAST_RTX
);
sctp_transport_lower_cwnd
(
transport
,
SCTP_LOWER_CWND_FAST_RTX
);
fast_retransmit
=
1
;
fast_retransmit
=
1
;
break
;
break
;
case
SCTP_RTXR_PMTUD
:
default:
default:
break
;
break
;
}
}
...
@@ -876,7 +877,7 @@ int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout)
...
@@ -876,7 +877,7 @@ int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout)
start_timer
=
0
;
start_timer
=
0
;
queue
=
&
q
->
out
;
queue
=
&
q
->
out
;
while
(
NULL
!=
(
chunk
=
sctp_outq_dequeue_data
(
q
)))
{
while
((
chunk
=
sctp_outq_dequeue_data
(
q
)))
{
/* RFC 2960 6.5 Every DATA chunk MUST carry a valid
/* RFC 2960 6.5 Every DATA chunk MUST carry a valid
* stream identifier.
* stream identifier.
*/
*/
...
@@ -891,9 +892,7 @@ int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout)
...
@@ -891,9 +892,7 @@ int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout)
if
(
ev
)
if
(
ev
)
sctp_ulpq_tail_event
(
&
asoc
->
ulpq
,
ev
);
sctp_ulpq_tail_event
(
&
asoc
->
ulpq
,
ev
);
/* Free the chunk. This chunk is not on any
/* Free the chunk. */
* list yet, just free it.
*/
sctp_free_chunk
(
chunk
);
sctp_free_chunk
(
chunk
);
continue
;
continue
;
}
}
...
@@ -1572,7 +1571,7 @@ static void sctp_check_transmitted(struct sctp_outq *q,
...
@@ -1572,7 +1571,7 @@ static void sctp_check_transmitted(struct sctp_outq *q,
if
(
transport
)
{
if
(
transport
)
{
if
(
do_fast_retransmit
)
if
(
do_fast_retransmit
)
sctp_retransmit
(
q
,
transport
,
SCTP_R
ETRANSMIT
_FAST_RTX
);
sctp_retransmit
(
q
,
transport
,
SCTP_R
TXR
_FAST_RTX
);
SCTP_DEBUG_PRINTK
(
"%s: transport: %p, cwnd: %d, "
SCTP_DEBUG_PRINTK
(
"%s: transport: %p, cwnd: %d, "
"ssthresh: %d, flight_size: %d, pba: %d
\n
"
,
"ssthresh: %d, flight_size: %d, pba: %d
\n
"
,
...
...
net/sctp/sm_sideeffect.c
View file @
9850a96f
...
@@ -55,1202 +55,1128 @@
...
@@ -55,1202 +55,1128 @@
#include <net/sctp/sctp.h>
#include <net/sctp/sctp.h>
#include <net/sctp/sm.h>
#include <net/sctp/sm.h>
/* Do forward declarations of static functions. */
/********************************************************************
static
void
sctp_do_ecn_ce_work
(
sctp_association_t
*
,
__u32
lowest_tsn
);
* Helper functions
static
sctp_chunk_t
*
sctp_do_ecn_ecne_work
(
sctp_association_t
*
asoc
,
********************************************************************/
__u32
lowest_tsn
,
sctp_chunk_t
*
);
static
void
sctp_do_ecn_cwr_work
(
sctp_association_t
*
,
__u32
lowest_tsn
);
static
void
sctp_do_8_2_transport_strike
(
sctp_association_t
*
,
struct
sctp_transport
*
);
static
void
sctp_cmd_init_failed
(
sctp_cmd_seq_t
*
,
sctp_association_t
*
);
static
void
sctp_cmd_assoc_failed
(
sctp_cmd_seq_t
*
,
sctp_association_t
*
,
sctp_event_t
,
sctp_subtype_t
,
sctp_chunk_t
*
chunk
);
static
int
sctp_cmd_process_init
(
sctp_cmd_seq_t
*
,
sctp_association_t
*
,
sctp_chunk_t
*
chunk
,
sctp_init_chunk_t
*
peer_init
,
int
priority
);
static
void
sctp_cmd_hb_timers_start
(
sctp_cmd_seq_t
*
,
sctp_association_t
*
);
static
void
sctp_cmd_hb_timers_stop
(
sctp_cmd_seq_t
*
,
sctp_association_t
*
);
static
void
sctp_cmd_hb_timer_update
(
sctp_cmd_seq_t
*
,
sctp_association_t
*
,
struct
sctp_transport
*
);
static
void
sctp_cmd_transport_reset
(
sctp_cmd_seq_t
*
,
sctp_association_t
*
,
struct
sctp_transport
*
);
static
void
sctp_cmd_transport_on
(
sctp_cmd_seq_t
*
,
sctp_association_t
*
,
struct
sctp_transport
*
,
sctp_chunk_t
*
);
static
int
sctp_cmd_process_sack
(
sctp_cmd_seq_t
*
,
sctp_association_t
*
,
sctp_sackhdr_t
*
);
static
void
sctp_cmd_setup_t2
(
sctp_cmd_seq_t
*
,
sctp_association_t
*
,
sctp_chunk_t
*
);
static
void
sctp_cmd_new_state
(
sctp_cmd_seq_t
*
,
sctp_association_t
*
,
sctp_state_t
);
/* These three macros allow us to pull the debugging code out of the
* main flow of sctp_do_sm() to keep attention focused on the real
* functionality there.
*/
#define DEBUG_PRE \
SCTP_DEBUG_PRINTK("sctp_do_sm prefn: " \
"ep %p, %s, %s, asoc %p[%s], %s\n", \
ep, sctp_evttype_tbl[event_type], \
(*debug_fn)(subtype), asoc, \
sctp_state_tbl[state], state_fn->name)
#define DEBUG_POST \
/* A helper function for delayed processing of INET ECN CE bit. */
SCTP_DEBUG_PRINTK("sctp_do_sm postfn: " \
static
void
sctp_do_ecn_ce_work
(
sctp_association_t
*
asoc
,
__u32
lowest_tsn
)
"asoc %p, status: %s\n", \
{
asoc, sctp_status_tbl[status])
/* Save the TSN away for comparison when we receive CWR */
#define DEBUG_POST_SFX \
asoc
->
last_ecne_tsn
=
lowest_tsn
;
SCTP_DEBUG_PRINTK("sctp_do_sm post sfx: error %d, asoc %p[%s]\n", \
asoc
->
need_ecne
=
1
;
error, asoc, \
}
sctp_state_tbl[(asoc && sctp_id2assoc(ep->base.sk, \
sctp_assoc2id(asoc)))?asoc->state:SCTP_STATE_CLOSED])
/*
/*
Helper function for delayed processing of SCTP ECNE chunk. */
* This is the master state machine processing function.
/* RFC 2960 Appendix A
*
*
* If you want to understand all of lksctp, this is a
* RFC 2481 details a specific bit for a sender to send in
* good place to start.
* the header of its next outbound TCP segment to indicate to
* its peer that it has reduced its congestion window. This
* is termed the CWR bit. For SCTP the same indication is made
* by including the CWR chunk. This chunk contains one data
* element, i.e. the TSN number that was sent in the ECNE chunk.
* This element represents the lowest TSN number in the datagram
* that was originally marked with the CE bit.
*/
*/
int
sctp_do_sm
(
sctp_event_t
event_type
,
sctp_subtype_t
subtype
,
static
sctp_chunk_t
*
sctp_do_ecn_ecne_work
(
sctp_association_t
*
asoc
,
sctp_state_t
state
,
__u32
lowest_tsn
,
sctp_endpoint_t
*
ep
,
sctp_chunk_t
*
chunk
)
sctp_association_t
*
asoc
,
void
*
event_arg
,
int
priority
)
{
{
sctp_cmd_seq_t
commands
;
sctp_chunk_t
*
repl
;
sctp_sm_table_entry_t
*
state_fn
;
sctp_disposition_t
status
;
int
error
=
0
;
typedef
const
char
*
(
printfn_t
)(
sctp_subtype_t
);
static
printfn_t
*
table
[]
=
{
/* Our previously transmitted packet ran into some congestion
NULL
,
sctp_cname
,
sctp_tname
,
sctp_oname
,
sctp_pname
,
* so we should take action by reducing cwnd and ssthresh
};
* and then ACK our peer that we we've done so by
printfn_t
*
debug_fn
__attribute__
((
unused
))
=
table
[
event_type
];
* sending a CWR.
*/
/* Look up the state function, run it, and then process the
/* First, try to determine if we want to actually lower
* side effects. These three steps are the heart of lksctp.
* our cwnd variables. Only lower them if the ECNE looks more
* recent than the last response.
*/
*/
state_fn
=
sctp_sm_lookup_event
(
event_type
,
state
,
subtype
);
if
(
TSN_lt
(
asoc
->
last_cwr_tsn
,
lowest_tsn
))
{
struct
sctp_transport
*
transport
;
sctp_init_cmd_seq
(
&
commands
);
/* Find which transport's congestion variables
* need to be adjusted.
*/
transport
=
sctp_assoc_lookup_tsn
(
asoc
,
lowest_tsn
);
DEBUG_PRE
;
/* Update the congestion variables. */
status
=
(
*
state_fn
->
fn
)(
ep
,
asoc
,
subtype
,
event_arg
,
&
commands
);
if
(
transport
)
DEBUG_POST
;
sctp_transport_lower_cwnd
(
transport
,
SCTP_LOWER_CWND_ECNE
);
asoc
->
last_cwr_tsn
=
lowest_tsn
;
}
error
=
sctp_side_effects
(
event_type
,
subtype
,
state
,
/* Always try to quiet the other end. In case of lost CWR,
ep
,
asoc
,
event_arg
,
* resend last_cwr_tsn.
status
,
&
commands
,
*/
priority
);
repl
=
sctp_make_cwr
(
asoc
,
asoc
->
last_cwr_tsn
,
chunk
);
DEBUG_POST_SFX
;
return
error
;
/* If we run out of memory, it will look like a lost CWR. We'll
* get back in sync eventually.
*/
return
repl
;
}
}
#undef DEBUG_PRE
/* Helper function to do delayed processing of ECN CWR chunk. */
#undef DEBUG_POST
static
void
sctp_do_ecn_cwr_work
(
sctp_association_t
*
asoc
,
__u32
lowest_tsn
)
/*****************************************************************
* This the master state function side effect processing function.
*****************************************************************/
int
sctp_side_effects
(
sctp_event_t
event_type
,
sctp_subtype_t
subtype
,
sctp_state_t
state
,
sctp_endpoint_t
*
ep
,
sctp_association_t
*
asoc
,
void
*
event_arg
,
sctp_disposition_t
status
,
sctp_cmd_seq_t
*
commands
,
int
priority
)
{
{
int
error
;
/* Turn off ECNE getting auto-prepended to every outgoing
* packet
/* FIXME - Most of the dispositions left today would be categorized
* as "exceptional" dispositions. For those dispositions, it
* may not be proper to run through any of the commands at all.
* For example, the command interpreter might be run only with
* disposition SCTP_DISPOSITION_CONSUME.
*/
*/
if
(
0
!=
(
error
=
sctp_cmd_interpreter
(
event_type
,
subtype
,
state
,
asoc
->
need_ecne
=
0
;
ep
,
asoc
,
}
event_arg
,
status
,
commands
,
priority
)))
goto
bail
;
switch
(
status
)
{
/* Generate SACK if necessary. We call this at the end of a packet. */
case
SCTP_DISPOSITION_DISCARD
:
int
sctp_gen_sack
(
struct
sctp_association
*
asoc
,
int
force
,
SCTP_DEBUG_PRINTK
(
"Ignored sctp protocol event - state %d, "
sctp_cmd_seq_t
*
commands
)
"event_type %d, event_id %d
\n
"
,
{
state
,
event_type
,
subtype
.
chunk
);
__u32
ctsn
,
max_tsn_seen
;
break
;
struct
sctp_chunk
*
sack
;
int
error
=
0
;
case
SCTP_DISPOSITION_NOMEM
:
if
(
force
)
/* We ran out of memory, so we need to discard this
asoc
->
peer
.
sack_needed
=
1
;
* packet.
*/
/* BUG--we should now recover some memory, probably by
* reneging...
*/
error
=
-
ENOMEM
;
break
;
case
SCTP_DISPOSITION_DELETE_TCB
:
ctsn
=
sctp_tsnmap_get_ctsn
(
&
asoc
->
peer
.
tsn_map
);
/* This should now be a command. */
max_tsn_seen
=
sctp_tsnmap_get_max_tsn_seen
(
&
asoc
->
peer
.
tsn_map
);
break
;
case
SCTP_DISPOSITION_CONSUME
:
/* From 12.2 Parameters necessary per association (i.e. the TCB):
case
SCTP_DISPOSITION_ABORT
:
*
/*
* Ack State : This flag indicates if the next received packet
* We should no longer have much work to do here as the
* : is to be responded to with a SACK. ...
* real work has been done as explicit commands above.
* : When DATA chunks are out of order, SACK's
* : are not delayed (see Section 6).
*
* [This is actually not mentioned in Section 6, but we
* implement it here anyway. --piggy]
*/
*/
break
;
if
(
max_tsn_seen
!=
ctsn
)
asoc
->
peer
.
sack_needed
=
1
;
case
SCTP_DISPOSITION_VIOLATION
:
printk
(
KERN_ERR
"sctp protocol violation state %d "
"chunkid %d
\n
"
,
state
,
subtype
.
chunk
);
break
;
case
SCTP_DISPOSITION_NOT_IMPL
:
/* From 6.2 Acknowledgement on Reception of DATA Chunks:
printk
(
KERN_WARNING
"sctp unimplemented feature in state %d, "
*
"event_type %d, event_id %d
\n
"
,
* Section 4.2 of [RFC2581] SHOULD be followed. Specifically,
state
,
event_type
,
subtype
.
chunk
);
* an acknowledgement SHOULD be generated for at least every
break
;
* second packet (not every second DATA chunk) received, and
* SHOULD be generated within 200 ms of the arrival of any
* unacknowledged DATA chunk. ...
*/
if
(
!
asoc
->
peer
.
sack_needed
)
{
/* We will need a SACK for the next packet. */
asoc
->
peer
.
sack_needed
=
1
;
goto
out
;
}
else
{
if
(
asoc
->
a_rwnd
>
asoc
->
rwnd
)
asoc
->
a_rwnd
=
asoc
->
rwnd
;
sack
=
sctp_make_sack
(
asoc
);
if
(
!
sack
)
goto
nomem
;
case
SCTP_DISPOSITION_BUG
:
asoc
->
peer
.
sack_needed
=
0
;
printk
(
KERN_ERR
"sctp bug in state %d, "
"event_type %d, event_id %d
\n
"
,
state
,
event_type
,
subtype
.
chunk
);
BUG
();
break
;
default:
error
=
sctp_outq_tail
(
&
asoc
->
outqueue
,
sack
);
printk
(
KERN_ERR
"sctp impossible disposition %d "
"in state %d, event_type %d, event_id %d
\n
"
,
status
,
state
,
event_type
,
subtype
.
chunk
);
BUG
();
break
;
};
bail:
/* Stop the SACK timer. */
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_TIMER_STOP
,
SCTP_TO
(
SCTP_EVENT_TIMEOUT_SACK
));
}
out:
return
error
;
nomem:
error
=
-
ENOMEM
;
return
error
;
return
error
;
}
}
/********************************************************************
/* When the T3-RTX timer expires, it calls this function to create the
* 2nd Level Abstractions
* relevant state machine event.
********************************************************************/
*/
void
sctp_generate_t3_rtx_event
(
unsigned
long
peer
)
/* This is the side-effect interpreter. */
int
sctp_cmd_interpreter
(
sctp_event_t
event_type
,
sctp_subtype_t
subtype
,
sctp_state_t
state
,
sctp_endpoint_t
*
ep
,
sctp_association_t
*
asoc
,
void
*
event_arg
,
sctp_disposition_t
status
,
sctp_cmd_seq_t
*
commands
,
int
priority
)
{
{
int
error
=
0
;
int
error
;
int
force
;
struct
sctp_transport
*
transport
=
(
struct
sctp_transport
*
)
peer
;
sctp_cmd_t
*
cmd
;
sctp_association_t
*
asoc
=
transport
->
asoc
;
sctp_chunk_t
*
new_obj
;
sctp_chunk_t
*
chunk
=
NULL
;
struct
sctp_packet
*
packet
;
struct
list_head
*
pos
;
struct
timer_list
*
timer
;
unsigned
long
timeout
;
struct
sctp_transport
*
t
;
sctp_sackhdr_t
sackh
;
if
(
SCTP_EVENT_T_TIMEOUT
!=
event_type
)
chunk
=
(
sctp_chunk_t
*
)
event_arg
;
/* Note: This whole file is a huge candidate for rework.
/* Check whether a task is in the sock. */
* For example, each command could either have its own handler, so
* the loop would look like:
* while (cmds)
* cmd->handle(x, y, z)
* --jgrimm
*/
while
(
NULL
!=
(
cmd
=
sctp_next_cmd
(
commands
)))
{
switch
(
cmd
->
verb
)
{
case
SCTP_CMD_NOP
:
/* Do nothing. */
break
;
case
SCTP_CMD_NEW_ASOC
:
sctp_bh_lock_sock
(
asoc
->
base
.
sk
);
/* Register a new association. */
if
(
sock_owned_by_user
(
asoc
->
base
.
sk
))
{
asoc
=
cmd
->
obj
.
ptr
;
SCTP_DEBUG_PRINTK
(
"%s:Sock is busy.
\n
"
,
__FUNCTION__
);
/* Register with the endpoint. */
sctp_endpoint_add_asoc
(
ep
,
asoc
);
sctp_hash_established
(
asoc
);
break
;
case
SCTP_CMD_UPDATE_ASSOC
:
/* Try again later. */
sctp_assoc_update
(
asoc
,
cmd
->
obj
.
ptr
);
if
(
!
mod_timer
(
&
transport
->
T3_rtx_timer
,
jiffies
+
(
HZ
/
20
)))
break
;
sctp_transport_hold
(
transport
);
goto
out_unlock
;
}
case
SCTP_CMD_PURGE_OUTQUEUE
:
/* Is this transport really dead and just waiting around for
sctp_outq_teardown
(
&
asoc
->
outqueue
);
* the timer to let go of the reference?
break
;
*/
if
(
transport
->
dead
)
goto
out_unlock
;
case
SCTP_CMD_DELETE_TCB
:
/* Run through the state machine. */
/* Delete the current association. */
error
=
sctp_do_sm
(
SCTP_EVENT_T_TIMEOUT
,
sctp_unhash_established
(
asoc
);
SCTP_ST_TIMEOUT
(
SCTP_EVENT_TIMEOUT_T3_RTX
),
sctp_association_free
(
asoc
);
asoc
->
state
,
asoc
=
NULL
;
asoc
->
ep
,
asoc
,
break
;
transport
,
GFP_ATOMIC
)
;
case
SCTP_CMD_NEW_STATE
:
if
(
error
)
/* Enter a new state. */
asoc
->
base
.
sk
->
err
=
-
error
;
sctp_cmd_new_state
(
commands
,
asoc
,
cmd
->
obj
.
state
);
break
;
case
SCTP_CMD_REPORT_TSN
:
out_unlock
:
/* Record the arrival of a TSN. */
sctp_bh_unlock_sock
(
asoc
->
base
.
sk
);
sctp_tsnmap_mark
(
&
asoc
->
peer
.
tsn_map
,
cmd
->
obj
.
u32
);
sctp_transport_put
(
transport
);
break
;
}
case
SCTP_CMD_GEN_SACK
:
/* This is a sa interface for producing timeout events. It works
/* Generate a Selective ACK.
* for timeouts which use the association as their parameter.
* The argument tells us whether to just count
* the packet and MAYBE generate a SACK, or
* force a SACK out.
*/
*/
force
=
cmd
->
obj
.
i32
;
static
void
sctp_generate_timeout_event
(
sctp_association_t
*
asoc
,
error
=
sctp_gen_sack
(
asoc
,
force
,
commands
);
sctp_event_timeout_t
timeout_type
)
break
;
{
int
error
=
0
;
case
SCTP_CMD_PROCESS_SACK
:
/* Process an inbound SACK. */
error
=
sctp_cmd_process_sack
(
commands
,
asoc
,
cmd
->
obj
.
ptr
);
break
;
case
SCTP_CMD_GEN_INIT_ACK
:
sctp_bh_lock_sock
(
asoc
->
base
.
sk
);
/* Generate an INIT ACK chunk. */
if
(
sock_owned_by_user
(
asoc
->
base
.
sk
))
{
new_obj
=
sctp_make_init_ack
(
asoc
,
chunk
,
GFP_ATOMIC
,
SCTP_DEBUG_PRINTK
(
"%s:Sock is busy: timer %d
\n
"
,
0
);
__FUNCTION__
,
if
(
!
new_obj
)
timeout_type
);
goto
nomem
;
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_REPLY
,
/* Try again later. */
SCTP_CHUNK
(
new_obj
));
if
(
!
mod_timer
(
&
asoc
->
timers
[
timeout_type
],
jiffies
+
(
HZ
/
20
)))
break
;
sctp_association_hold
(
asoc
);
goto
out_unlock
;
}
case
SCTP_CMD_PEER_INIT
:
/* Is this association really dead and just waiting around for
/* Process a unified INIT from the peer.
* the timer to let go of the reference?
* Note: Only used during INIT-ACK processing. If
* there is an error just return to the outter
* layer which will bail.
*/
*/
error
=
sctp_cmd_process_init
(
commands
,
asoc
,
chunk
,
if
(
asoc
->
base
.
dead
)
cmd
->
obj
.
ptr
,
priority
);
goto
out_unlock
;
break
;
case
SCTP_CMD_GEN_COOKIE_ECHO
:
/* Run through the state machine. */
/* Generate a COOKIE ECHO chunk. */
error
=
sctp_do_sm
(
SCTP_EVENT_T_TIMEOUT
,
new_obj
=
sctp_make_cookie_echo
(
asoc
,
chunk
);
SCTP_ST_TIMEOUT
(
timeout_type
),
if
(
!
new_obj
)
{
asoc
->
state
,
asoc
->
ep
,
asoc
,
if
(
cmd
->
obj
.
ptr
)
(
void
*
)
timeout_type
,
sctp_free_chunk
(
cmd
->
obj
.
ptr
);
GFP_ATOMIC
);
goto
nomem
;
}
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_REPLY
,
SCTP_CHUNK
(
new_obj
));
/* If there is an ERROR chunk to be sent along with
if
(
error
)
* the COOKIE_ECHO, send it, too.
asoc
->
base
.
sk
->
err
=
-
error
;
*/
if
(
cmd
->
obj
.
ptr
)
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_REPLY
,
SCTP_CHUNK
(
cmd
->
obj
.
ptr
));
break
;
case
SCTP_CMD_GEN_SHUTDOWN
:
out_unlock:
/* Generate SHUTDOWN when in SHUTDOWN_SENT state.
sctp_bh_unlock_sock
(
asoc
->
base
.
sk
);
* Reset error counts.
sctp_association_put
(
asoc
);
*/
}
asoc
->
overall_error_count
=
0
;
/* Generate a SHUTDOWN chunk. */
void
sctp_generate_t1_cookie_event
(
unsigned
long
data
)
new_obj
=
sctp_make_shutdown
(
asoc
);
{
if
(
!
new_obj
)
sctp_association_t
*
asoc
=
(
sctp_association_t
*
)
data
;
goto
nomem
;
sctp_generate_timeout_event
(
asoc
,
SCTP_EVENT_TIMEOUT_T1_COOKIE
);
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_REPLY
,
}
SCTP_CHUNK
(
new_obj
));
break
;
case
SCTP_CMD_CHUNK_ULP
:
void
sctp_generate_t1_init_event
(
unsigned
long
data
)
/* Send a chunk to the sockets layer. */
{
SCTP_DEBUG_PRINTK
(
"sm_sideff: %s %p, %s %p.
\n
"
,
sctp_association_t
*
asoc
=
(
sctp_association_t
*
)
data
;
"chunk_up:"
,
cmd
->
obj
.
ptr
,
sctp_generate_timeout_event
(
asoc
,
SCTP_EVENT_TIMEOUT_T1_INIT
);
"ulpq:"
,
&
asoc
->
ulpq
);
}
sctp_ulpq_tail_data
(
&
asoc
->
ulpq
,
cmd
->
obj
.
ptr
,
GFP_ATOMIC
);
break
;
case
SCTP_CMD_EVENT_ULP
:
void
sctp_generate_t2_shutdown_event
(
unsigned
long
data
)
/* Send a notification to the sockets layer. */
{
SCTP_DEBUG_PRINTK
(
"sm_sideff: %s %p, %s %p.
\n
"
,
sctp_association_t
*
asoc
=
(
sctp_association_t
*
)
data
;
"event_up:"
,
cmd
->
obj
.
ptr
,
sctp_generate_timeout_event
(
asoc
,
SCTP_EVENT_TIMEOUT_T2_SHUTDOWN
);
"ulpq:"
,
&
asoc
->
ulpq
);
}
sctp_ulpq_tail_event
(
&
asoc
->
ulpq
,
cmd
->
obj
.
ptr
);
break
;
case
SCTP_CMD_REPLY
:
void
sctp_generate_t5_shutdown_guard_event
(
unsigned
long
data
)
/* Send a chunk to our peer. */
{
error
=
sctp_outq_tail
(
&
asoc
->
outqueue
,
sctp_association_t
*
asoc
=
(
sctp_association_t
*
)
data
;
cmd
->
obj
.
ptr
);
sctp_generate_timeout_event
(
asoc
,
break
;
SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD
)
;
case
SCTP_CMD_SEND_PKT
:
}
/* sctp_generate_t5_shutdown_guard_event() */
/* Send a full packet to our peer. */
packet
=
cmd
->
obj
.
ptr
;
sctp_packet_transmit
(
packet
);
sctp_ootb_pkt_free
(
packet
);
break
;
case
SCTP_CMD_RETRAN
:
void
sctp_generate_autoclose_event
(
unsigned
long
data
)
/* Mark a transport for retransmission. */
{
sctp_retransmit
(
&
asoc
->
outqueue
,
cmd
->
obj
.
transport
,
sctp_association_t
*
asoc
=
(
sctp_association_t
*
)
data
;
SCTP_RETRANSMIT_T3_RTX
);
sctp_generate_timeout_event
(
asoc
,
SCTP_EVENT_TIMEOUT_AUTOCLOSE
);
break
;
}
case
SCTP_CMD_TRANSMIT
:
/* Generate a heart beat event. If the sock is busy, reschedule. Make
/* Kick start transmission. */
* sure that the transport is still valid.
error
=
sctp_outq_flush
(
&
asoc
->
outqueue
,
0
);
*/
break
;
void
sctp_generate_heartbeat_event
(
unsigned
long
data
)
{
int
error
=
0
;
struct
sctp_transport
*
transport
=
(
struct
sctp_transport
*
)
data
;
sctp_association_t
*
asoc
=
transport
->
asoc
;
case
SCTP_CMD_ECN_CE
:
sctp_bh_lock_sock
(
asoc
->
base
.
sk
);
/* Do delayed CE processing. */
if
(
sock_owned_by_user
(
asoc
->
base
.
sk
))
{
sctp_do_ecn_ce_work
(
asoc
,
cmd
->
obj
.
u32
);
SCTP_DEBUG_PRINTK
(
"%s:Sock is busy.
\n
"
,
__FUNCTION__
);
break
;
case
SCTP_CMD_ECN_ECNE
:
/* Try again later. */
/* Do delayed ECNE processing. */
if
(
!
mod_timer
(
&
transport
->
hb_timer
,
jiffies
+
(
HZ
/
20
)))
new_obj
=
sctp_do_ecn_ecne_work
(
asoc
,
cmd
->
obj
.
u32
,
sctp_transport_hold
(
transport
);
chunk
);
goto
out_unlock
;
if
(
new_obj
)
}
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_REPLY
,
SCTP_CHUNK
(
new_obj
));
break
;
case
SCTP_CMD_ECN_CWR
:
/* Is this structure just waiting around for us to actually
/* Do delayed CWR processing. */
* get destroyed?
sctp_do_ecn_cwr_work
(
asoc
,
cmd
->
obj
.
u32
);
*/
break
;
if
(
transport
->
dead
)
goto
out_unlock
;
case
SCTP_CMD_SETUP_T2
:
error
=
sctp_do_sm
(
SCTP_EVENT_T_TIMEOUT
,
sctp_cmd_setup_t2
(
commands
,
asoc
,
cmd
->
obj
.
ptr
);
SCTP_ST_TIMEOUT
(
SCTP_EVENT_TIMEOUT_HEARTBEAT
),
break
;
asoc
->
state
,
asoc
->
ep
,
asoc
,
transport
,
GFP_ATOMIC
);
case
SCTP_CMD_TIMER_START
:
if
(
error
)
timer
=
&
asoc
->
timers
[
cmd
->
obj
.
to
];
asoc
->
base
.
sk
->
err
=
-
error
;
timeout
=
asoc
->
timeouts
[
cmd
->
obj
.
to
];
if
(
!
timeout
)
BUG
();
timer
->
expires
=
jiffies
+
timeout
;
out_unlock:
sctp_association_hold
(
asoc
);
sctp_bh_unlock_sock
(
asoc
->
base
.
sk
);
add_timer
(
timer
);
sctp_transport_put
(
transport
);
break
;
}
case
SCTP_CMD_TIMER_RESTART
:
/* Inject a SACK Timeout event into the state machine. */
timer
=
&
asoc
->
timers
[
cmd
->
obj
.
to
];
void
sctp_generate_sack_event
(
unsigned
long
data
)
timeout
=
asoc
->
timeouts
[
cmd
->
obj
.
to
];
{
if
(
!
mod_timer
(
timer
,
jiffies
+
timeout
))
sctp_association_t
*
asoc
=
(
sctp_association_t
*
)
data
;
sctp_association_hold
(
asoc
);
sctp_generate_timeout_event
(
asoc
,
SCTP_EVENT_TIMEOUT_SACK
);
break
;
}
case
SCTP_CMD_TIMER_STOP
:
sctp_timer_event_t
*
sctp_timer_events
[
SCTP_NUM_TIMEOUT_TYPES
]
=
{
timer
=
&
asoc
->
timers
[
cmd
->
obj
.
to
];
NULL
,
if
(
timer_pending
(
timer
)
&&
del_timer
(
timer
))
sctp_generate_t1_cookie_event
,
sctp_association_put
(
asoc
);
sctp_generate_t1_init_event
,
break
;
sctp_generate_t2_shutdown_event
,
NULL
,
sctp_generate_t5_shutdown_guard_event
,
sctp_generate_heartbeat_event
,
sctp_generate_sack_event
,
sctp_generate_autoclose_event
,
};
case
SCTP_CMD_INIT_RESTART
:
/* Do the needed accounting and updates
/* RFC 2960 8.2 Path Failure Detection
* associated with restarting an initialization
*
* timer.
* When its peer endpoint is multi-homed, an endpoint should keep a
* error counter for each of the destination transport addresses of the
* peer endpoint.
*
* Each time the T3-rtx timer expires on any address, or when a
* HEARTBEAT sent to an idle address is not acknowledged within a RTO,
* the error counter of that destination address will be incremented.
* When the value in the error counter exceeds the protocol parameter
* 'Path.Max.Retrans' of that destination address, the endpoint should
* mark the destination transport address as inactive, and a
* notification SHOULD be sent to the upper layer.
*
*/
*/
asoc
->
counters
[
SCTP_COUNTER_INIT_ERROR
]
++
;
static
void
sctp_do_8_2_transport_strike
(
sctp_association_t
*
asoc
,
asoc
->
timeouts
[
cmd
->
obj
.
to
]
*=
2
;
struct
sctp_transport
*
transport
)
if
(
asoc
->
timeouts
[
cmd
->
obj
.
to
]
>
{
asoc
->
max_init_timeo
)
{
/* The check for association's overall error counter exceeding the
asoc
->
timeouts
[
cmd
->
obj
.
to
]
=
* threshold is done in the state function.
asoc
->
max_init_timeo
;
*/
asoc
->
overall_error_count
++
;
if
(
transport
->
active
&&
(
transport
->
error_count
++
>=
transport
->
error_threshold
))
{
SCTP_DEBUG_PRINTK
(
"transport_strike: transport "
"IP:%d.%d.%d.%d failed.
\n
"
,
NIPQUAD
(
transport
->
ipaddr
.
v4
.
sin_addr
));
sctp_assoc_control_transport
(
asoc
,
transport
,
SCTP_TRANSPORT_DOWN
,
SCTP_FAILED_THRESHOLD
);
}
}
/* If we've sent any data bundled with
/* E2) For the destination address for which the timer
* COOKIE-ECHO we need to resend.
* expires, set RTO <- RTO * 2 ("back off the timer"). The
* maximum value discussed in rule C7 above (RTO.max) may be
* used to provide an upper bound to this doubling operation.
*/
*/
list_for_each
(
pos
,
&
asoc
->
peer
.
transport_addr_list
)
{
transport
->
rto
=
min
((
transport
->
rto
*
2
),
transport
->
asoc
->
rto_max
);
t
=
list_entry
(
pos
,
struct
sctp_transport
,
}
transports
);
sctp_retransmit_mark
(
&
asoc
->
outqueue
,
t
,
0
);
}
sctp_add_cmd_sf
(
commands
,
/* Worker routine to handle INIT command failure. */
SCTP_CMD_TIMER_RESTART
,
static
void
sctp_cmd_init_failed
(
sctp_cmd_seq_t
*
commands
,
SCTP_TO
(
cmd
->
obj
.
to
));
sctp_association_t
*
asoc
)
break
;
{
struct
sctp_ulpevent
*
event
;
case
SCTP_CMD_INIT_FAILED
:
event
=
sctp_ulpevent_make_assoc_change
(
asoc
,
0
,
SCTP_CANT_STR_ASSOC
,
sctp_cmd_init_failed
(
commands
,
asoc
);
0
,
0
,
0
,
GFP_ATOMIC
);
break
;
case
SCTP_CMD_ASSOC_FAILED
:
if
(
event
)
sctp_cmd_assoc_failed
(
commands
,
asoc
,
event_type
,
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_EVENT_ULP
,
subtype
,
chunk
);
SCTP_ULPEVENT
(
event
));
break
;
case
SCTP_CMD_COUNTER_INC
:
/* FIXME: We need to handle data possibly either
asoc
->
counters
[
cmd
->
obj
.
counter
]
++
;
* sent via COOKIE-ECHO bundling or just waiting in
break
;
* the transmit queue, if the user has enabled
* SEND_FAILED notifications.
*/
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_DELETE_TCB
,
SCTP_NULL
());
}
case
SCTP_CMD_COUNTER_RESET
:
/* Worker routine to handle SCTP_CMD_ASSOC_FAILED. */
asoc
->
counters
[
cmd
->
obj
.
counter
]
=
0
;
static
void
sctp_cmd_assoc_failed
(
sctp_cmd_seq_t
*
commands
,
break
;
sctp_association_t
*
asoc
,
sctp_event_t
event_type
,
sctp_subtype_t
subtype
,
sctp_chunk_t
*
chunk
)
{
struct
sctp_ulpevent
*
event
;
__u16
error
=
0
;
case
SCTP_CMD_REPORT_DUP
:
switch
(
event_type
)
{
sctp_tsnmap_mark_dup
(
&
asoc
->
peer
.
tsn_map
,
case
SCTP_EVENT_T_PRIMITIVE
:
cmd
->
obj
.
u32
);
if
(
SCTP_PRIMITIVE_ABORT
==
subtype
.
primitive
)
error
=
SCTP_ERROR_USER_ABORT
;
break
;
break
;
case
SCTP_EVENT_T_CHUNK
:
case
SCTP_CMD_REPORT_BAD_TAG
:
if
(
chunk
&&
(
SCTP_CID_ABORT
==
chunk
->
chunk_hdr
->
type
)
&&
SCTP_DEBUG_PRINTK
(
"vtag mismatch!
\n
"
);
(
ntohs
(
chunk
->
chunk_hdr
->
length
)
>=
(
sizeof
(
struct
sctp_chunkhdr
)
+
sizeof
(
struct
sctp_errhdr
))))
{
error
=
((
sctp_errhdr_t
*
)
chunk
->
skb
->
data
)
->
cause
;
}
break
;
break
;
default:
case
SCTP_CMD_STRIKE
:
/* Mark one strike against a transport. */
sctp_do_8_2_transport_strike
(
asoc
,
cmd
->
obj
.
transport
);
break
;
break
;
}
case
SCTP_CMD_TRANSPORT_RESET
:
/* Cancel any partial delivery in progress. */
t
=
cmd
->
obj
.
transport
;
sctp_ulpq_abort_pd
(
&
asoc
->
ulpq
,
GFP_ATOMIC
);
sctp_cmd_transport_reset
(
commands
,
asoc
,
t
);
break
;
case
SCTP_CMD_TRANSPORT_ON
:
event
=
sctp_ulpevent_make_assoc_change
(
asoc
,
0
,
SCTP_COMM_LOST
,
t
=
cmd
->
obj
.
transport
;
error
,
0
,
0
,
GFP_ATOMIC
);
sctp_cmd_transport_on
(
commands
,
asoc
,
t
,
chunk
);
if
(
event
)
break
;
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_EVENT_ULP
,
SCTP_ULPEVENT
(
event
));
case
SCTP_CMD_HB_TIMERS_START
:
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_NEW_STATE
,
sctp_cmd_hb_timers_start
(
commands
,
asoc
);
SCTP_STATE
(
SCTP_STATE_CLOSED
));
break
;
case
SCTP_CMD_HB_TIMER_UPDATE
:
/* FIXME: We need to handle data that could not be sent or was not
t
=
cmd
->
obj
.
transport
;
* acked, if the user has enabled SEND_FAILED notifications.
sctp_cmd_hb_timer_update
(
commands
,
asoc
,
t
);
*/
break
;
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_DELETE_TCB
,
SCTP_NULL
());
}
case
SCTP_CMD_HB_TIMERS_STOP
:
/* Process an init chunk (may be real INIT/INIT-ACK or an embedded INIT
sctp_cmd_hb_timers_stop
(
commands
,
asoc
);
* inside the cookie. In reality, this is only used for INIT-ACK processing
break
;
* since all other cases use "temporary" associations and can do all
* their work in statefuns directly.
*/
static
int
sctp_cmd_process_init
(
sctp_cmd_seq_t
*
commands
,
sctp_association_t
*
asoc
,
sctp_chunk_t
*
chunk
,
sctp_init_chunk_t
*
peer_init
,
int
priority
)
{
int
error
;
case
SCTP_CMD_REPORT_ERROR
:
/* We only process the init as a sideeffect in a single
error
=
cmd
->
obj
.
error
;
* case. This is when we process the INIT-ACK. If we
break
;
* fail during INIT processing (due to malloc problems),
* just return the error and stop processing the stack.
*/
case
SCTP_CMD_PROCESS_CTSN
:
if
(
!
sctp_process_init
(
asoc
,
chunk
->
chunk_hdr
->
type
,
/* Dummy up a SACK for processing. */
sctp_source
(
chunk
),
peer_init
,
sackh
.
cum_tsn_ack
=
cmd
->
obj
.
u32
;
priority
))
sackh
.
a_rwnd
=
0
;
error
=
-
ENOMEM
;
sackh
.
num_gap_ack_blocks
=
0
;
else
sackh
.
num_dup_tsns
=
0
;
error
=
0
;
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_PROCESS_SACK
,
SCTP_SACKH
(
&
sackh
));
break
;
case
SCTP_CMD_DISCARD_PACKET
:
return
error
;
/* We need to discard the whole packet. */
}
chunk
->
pdiscard
=
1
;
break
;
case
SCTP_CMD_RTO_PENDING
:
/* Helper function to break out starting up of heartbeat timers. */
t
=
cmd
->
obj
.
transport
;
static
void
sctp_cmd_hb_timers_start
(
sctp_cmd_seq_t
*
cmds
,
t
->
rto_pending
=
1
;
sctp_association_t
*
asoc
)
break
;
{
struct
sctp_transport
*
t
;
struct
list_head
*
pos
;
case
SCTP_CMD_PART_DELIVER
:
/* Start a heartbeat timer for each transport on the association.
sctp_ulpq_partial_delivery
(
&
asoc
->
ulpq
,
cmd
->
obj
.
ptr
,
* hold a reference on the transport to make sure none of
GFP_ATOMIC
);
* the needed data structures go away.
break
;
*/
list_for_each
(
pos
,
&
asoc
->
peer
.
transport_addr_list
)
{
t
=
list_entry
(
pos
,
struct
sctp_transport
,
transports
);
case
SCTP_CMD_RENEGE
:
if
(
!
mod_timer
(
&
t
->
hb_timer
,
sctp_transport_timeout
(
t
)))
sctp_
ulpq_renege
(
&
asoc
->
ulpq
,
cmd
->
obj
.
ptr
,
sctp_
transport_hold
(
t
);
GFP_ATOMIC
);
}
break
;
}
default:
static
void
sctp_cmd_hb_timers_stop
(
sctp_cmd_seq_t
*
cmds
,
printk
(
KERN_WARNING
"Impossible command: %u, %p
\n
"
,
sctp_association_t
*
asoc
)
cmd
->
verb
,
cmd
->
obj
.
ptr
);
{
break
;
struct
sctp_transport
*
t
;
};
struct
list_head
*
pos
;
if
(
error
)
return
error
;
/* Stop all heartbeat timers. */
list_for_each
(
pos
,
&
asoc
->
peer
.
transport_addr_list
)
{
t
=
list_entry
(
pos
,
struct
sctp_transport
,
transports
);
if
(
del_timer
(
&
t
->
hb_timer
))
sctp_transport_put
(
t
);
}
}
return
error
;
nomem:
error
=
-
ENOMEM
;
return
error
;
}
}
/* A helper function for delayed processing of INET ECN CE bit. */
/* Helper function to update the heartbeat timer. */
static
void
sctp_do_ecn_ce_work
(
sctp_association_t
*
asoc
,
__u32
lowest_tsn
)
static
void
sctp_cmd_hb_timer_update
(
sctp_cmd_seq_t
*
cmds
,
sctp_association_t
*
asoc
,
struct
sctp_transport
*
t
)
{
{
/* Save the TSN away for comparison when we receive CWR */
/* Update the heartbeat timer. */
if
(
!
mod_timer
(
&
t
->
hb_timer
,
sctp_transport_timeout
(
t
)))
asoc
->
last_ecne_tsn
=
lowest_tsn
;
sctp_transport_hold
(
t
);
asoc
->
need_ecne
=
1
;
}
}
/* Helper function for delayed processing of SCTP ECNE chunk. */
/* Helper function to handle the reception of an HEARTBEAT ACK. */
/* RFC 2960 Appendix A
static
void
sctp_cmd_transport_on
(
sctp_cmd_seq_t
*
cmds
,
*
sctp_association_t
*
asoc
,
* RFC 2481 details a specific bit for a sender to send in
struct
sctp_transport
*
t
,
* the header of its next outbound TCP segment to indicate to
* its peer that it has reduced its congestion window. This
* is termed the CWR bit. For SCTP the same indication is made
* by including the CWR chunk. This chunk contains one data
* element, i.e. the TSN number that was sent in the ECNE chunk.
* This element represents the lowest TSN number in the datagram
* that was originally marked with the CE bit.
*/
static
sctp_chunk_t
*
sctp_do_ecn_ecne_work
(
sctp_association_t
*
asoc
,
__u32
lowest_tsn
,
sctp_chunk_t
*
chunk
)
sctp_chunk_t
*
chunk
)
{
{
sctp_
chunk_t
*
repl
;
sctp_
sender_hb_info_t
*
hbinfo
;
/*
Our previously transmitted packet ran into some congestion
/*
8.3 Upon the receipt of the HEARTBEAT ACK, the sender of the
*
so we should take action by reducing cwnd and ssthresh
*
HEARTBEAT should clear the error counter of the destination
*
and then ACK our peer that we we've done so by
*
transport address to which the HEARTBEAT was sent.
*
sending a CWR
.
*
The association's overall error count is also cleared
.
*/
*/
t
->
error_count
=
0
;
t
->
asoc
->
overall_error_count
=
0
;
/* First, try to determine if we want to actually lower
/* Mark the destination transport address as active if it is not so
* our cwnd variables. Only lower them if the ECNE looks more
* marked.
* recent than the last response.
*/
*/
if
(
TSN_lt
(
asoc
->
last_cwr_tsn
,
lowest_tsn
))
{
if
(
!
t
->
active
)
struct
sctp_transport
*
transport
;
sctp_assoc_control_transport
(
asoc
,
t
,
SCTP_TRANSPORT_UP
,
SCTP_HEARTBEAT_SUCCESS
);
/* Find which transport's congestion variables
/* The receiver of the HEARTBEAT ACK should also perform an
* need to be adjusted.
* RTT measurement for that destination transport address
* using the time value carried in the HEARTBEAT ACK chunk.
*/
*/
transport
=
sctp_assoc_lookup_tsn
(
asoc
,
lowest_tsn
);
hbinfo
=
(
sctp_sender_hb_info_t
*
)
chunk
->
skb
->
data
;
sctp_transport_update_rto
(
t
,
(
jiffies
-
hbinfo
->
sent_at
));
/* Update the congestion variables. */
}
if
(
transport
)
sctp_transport_lower_cwnd
(
transport
,
SCTP_LOWER_CWND_ECNE
);
asoc
->
last_cwr_tsn
=
lowest_tsn
;
}
/* Always try to quiet the other end. In case of lost CWR,
/* Helper function to do a transport reset at the expiry of the hearbeat
* resend last_cwr_tsn
.
* timer
.
*/
*/
repl
=
sctp_make_cwr
(
asoc
,
asoc
->
last_cwr_tsn
,
chunk
);
static
void
sctp_cmd_transport_reset
(
sctp_cmd_seq_t
*
cmds
,
sctp_association_t
*
asoc
,
struct
sctp_transport
*
t
)
{
sctp_transport_lower_cwnd
(
t
,
SCTP_LOWER_CWND_INACTIVE
);
/* If we run out of memory, it will look like a lost CWR. We'll
/* Mark one strike against a transport. */
* get back in sync eventually.
sctp_do_8_2_transport_strike
(
asoc
,
t
);
*/
return
repl
;
}
}
/* Helper function to do delayed processing of ECN CWR chunk. */
/* Helper function to process the process SACK command. */
static
void
sctp_do_ecn_cwr_work
(
sctp_association_t
*
asoc
,
static
int
sctp_cmd_process_sack
(
sctp_cmd_seq_t
*
cmds
,
__u32
lowest_tsn
)
sctp_association_t
*
asoc
,
sctp_sackhdr_t
*
sackh
)
{
{
/* Turn off ECNE getting auto-prepended to every outgoing
int
err
;
* packet
if
(
sctp_outq_sack
(
&
asoc
->
outqueue
,
sackh
))
{
/* There are no more TSNs awaiting SACK. */
err
=
sctp_do_sm
(
SCTP_EVENT_T_OTHER
,
SCTP_ST_OTHER
(
SCTP_EVENT_NO_PENDING_TSN
),
asoc
->
state
,
asoc
->
ep
,
asoc
,
NULL
,
GFP_ATOMIC
);
}
else
{
/* Windows may have opened, so we need
* to check if we have DATA to transmit
*/
*/
asoc
->
need_ecne
=
0
;
err
=
sctp_outq_flush
(
&
asoc
->
outqueue
,
0
);
}
return
err
;
}
}
/* This macro is to compress the text a bit... */
/* Helper function to set the timeout value for T2-SHUTDOWN timer and to set
#define AP(v) asoc->peer.v
* the transport for a shutdown chunk.
*/
static
void
sctp_cmd_setup_t2
(
sctp_cmd_seq_t
*
cmds
,
sctp_association_t
*
asoc
,
sctp_chunk_t
*
chunk
)
{
struct
sctp_transport
*
t
;
/* Generate SACK if necessary. We call this at the end of a packet. */
t
=
sctp_assoc_choose_shutdown_transport
(
asoc
);
int
sctp_gen_sack
(
sctp_association_t
*
asoc
,
int
force
,
sctp_cmd_seq_t
*
commands
)
asoc
->
shutdown_last_sent_to
=
t
;
asoc
->
timeouts
[
SCTP_EVENT_TIMEOUT_T2_SHUTDOWN
]
=
t
->
rto
;
chunk
->
transport
=
t
;
}
/* Helper function to change the state of an association. */
static
void
sctp_cmd_new_state
(
sctp_cmd_seq_t
*
cmds
,
sctp_association_t
*
asoc
,
sctp_state_t
state
)
{
{
__u32
ctsn
,
max_tsn_seen
;
sctp_chunk_t
*
sack
;
int
error
=
0
;
if
(
force
)
struct
sock
*
sk
=
asoc
->
base
.
sk
;
asoc
->
peer
.
sack_needed
=
1
;
struct
sctp_opt
*
sp
=
sctp_sk
(
sk
)
;
ctsn
=
sctp_tsnmap_get_ctsn
(
&
asoc
->
peer
.
tsn_map
)
;
asoc
->
state
=
state
;
max_tsn_seen
=
sctp_tsnmap_get_max_tsn_seen
(
&
asoc
->
peer
.
tsn_map
)
;
asoc
->
state_timestamp
=
jiffies
;
/* From 12.2 Parameters necessary per association (i.e. the TCB):
if
((
SCTP_STATE_ESTABLISHED
==
asoc
->
state
)
||
*
(
SCTP_STATE_CLOSED
==
asoc
->
state
))
{
* Ack State : This flag indicates if the next received packet
/* Wake up any processes waiting in the asoc's wait queue in
* : is to be responded to with a SACK. ...
* sctp_wait_for_connect() or sctp_wait_for_sndbuf().
* : When DATA chunks are out of order, SACK's
* : are not delayed (see Section 6).
*
* [This is actually not mentioned in Section 6, but we
* implement it here anyway. --piggy]
*/
*/
if
(
max_tsn_seen
!=
ctsn
)
if
(
waitqueue_active
(
&
asoc
->
wait
)
)
asoc
->
peer
.
sack_needed
=
1
;
wake_up_interruptible
(
&
asoc
->
wait
)
;
/* From 6.2 Acknowledgement on Reception of DATA Chunks:
/* Wake up any processes waiting in the sk's sleep queue of
*
* a TCP-style or UDP-style peeled-off socket in
* Section 4.2 of [RFC2581] SHOULD be followed. Specifically,
* sctp_wait_for_accept() or sctp_wait_for_packet().
* an acknowledgement SHOULD be generated for at least every
* For a UDP-style socket, the waiters are woken up by the
* second packet (not every second DATA chunk) received, and
* notifications.
* SHOULD be generated within 200 ms of the arrival of any
* unacknowledged DATA chunk. ...
*/
*/
if
(
!
asoc
->
peer
.
sack_needed
)
{
if
(
SCTP_SOCKET_UDP
!=
sp
->
type
)
/* We will need a SACK for the next packet. */
sk
->
state_change
(
sk
);
asoc
->
peer
.
sack_needed
=
1
;
}
goto
out
;
}
else
{
if
(
asoc
->
a_rwnd
>
asoc
->
rwnd
)
asoc
->
a_rwnd
=
asoc
->
rwnd
;
sack
=
sctp_make_sack
(
asoc
);
if
(
!
sack
)
goto
nomem
;
asoc
->
peer
.
sack_needed
=
0
;
error
=
sctp_outq_tail
(
&
asoc
->
outqueue
,
sack
);
/* Change the sk->state of a TCP-style socket that has sucessfully
* completed a connect() call.
*/
if
((
SCTP_STATE_ESTABLISHED
==
asoc
->
state
)
&&
(
SCTP_SOCKET_TCP
==
sp
->
type
)
&&
(
SCTP_SS_CLOSED
==
sk
->
state
))
sk
->
state
=
SCTP_SS_ESTABLISHED
;
}
/* Stop the SACK timer. */
/* These three macros allow us to pull the debugging code out of the
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_TIMER_STOP
,
* main flow of sctp_do_sm() to keep attention focused on the real
SCTP_TO
(
SCTP_EVENT_TIMEOUT_SACK
));
* functionality there.
}
*/
#define DEBUG_PRE \
SCTP_DEBUG_PRINTK("sctp_do_sm prefn: " \
"ep %p, %s, %s, asoc %p[%s], %s\n", \
ep, sctp_evttype_tbl[event_type], \
(*debug_fn)(subtype), asoc, \
sctp_state_tbl[state], state_fn->name)
out:
#define DEBUG_POST \
return
error
;
SCTP_DEBUG_PRINTK("sctp_do_sm postfn: " \
"asoc %p, status: %s\n", \
asoc, sctp_status_tbl[status])
nomem:
#define DEBUG_POST_SFX \
error
=
-
ENOMEM
;
SCTP_DEBUG_PRINTK("sctp_do_sm post sfx: error %d, asoc %p[%s]\n", \
return
error
;
error, asoc, \
}
sctp_state_tbl[(asoc && sctp_id2assoc(ep->base.sk, \
sctp_assoc2id(asoc)))?asoc->state:SCTP_STATE_CLOSED])
/* Handle a duplicate TSN. */
/*
void
sctp_do_TSNdup
(
sctp_association_t
*
asoc
,
sctp_chunk_t
*
chunk
,
long
gap
)
* This is the master state machine processing function.
*
* If you want to understand all of lksctp, this is a
* good place to start.
*/
int
sctp_do_sm
(
sctp_event_t
event_type
,
sctp_subtype_t
subtype
,
sctp_state_t
state
,
sctp_endpoint_t
*
ep
,
sctp_association_t
*
asoc
,
void
*
event_arg
,
int
priority
)
{
{
#if 0
sctp_cmd_seq_t
commands
;
sctp_chunk_t *sack;
sctp_sm_table_entry_t
*
state_fn
;
sctp_disposition_t
status
;
int
error
=
0
;
typedef
const
char
*
(
printfn_t
)(
sctp_subtype_t
);
/* Caution: gap < 2 * SCTP_TSN_MAP_SIZE
static
printfn_t
*
table
[]
=
{
* so gap can be negative.
NULL
,
sctp_cname
,
sctp_tname
,
sctp_oname
,
sctp_pname
,
*
};
* --xguo
printfn_t
*
debug_fn
__attribute__
((
unused
))
=
table
[
event_type
];
/* Look up the state function, run it, and then process the
* side effects. These three steps are the heart of lksctp.
*/
*/
state_fn
=
sctp_sm_lookup_event
(
event_type
,
state
,
subtype
);
sctp_init_cmd_seq
(
&
commands
);
DEBUG_PRE
;
status
=
(
*
state_fn
->
fn
)(
ep
,
asoc
,
subtype
,
event_arg
,
&
commands
);
DEBUG_POST
;
/* Count this TSN. */
error
=
sctp_side_effects
(
event_type
,
subtype
,
state
,
if (gap < SCTP_TSN_MAP_SIZE) {
ep
,
asoc
,
event_arg
,
asoc->peer.tsn_map[gap]++;
status
,
&
commands
,
} else {
priority
);
asoc->peer.tsn_map_overflow[gap - SCTP_TSN_MAP_SIZE]++;
DEBUG_POST_SFX
;
}
/* From 6.2 Acknowledgement on Reception of DATA Chunks
return
error
;
*
}
* When a packet arrives with duplicate DATA chunk(s)
* and with no new DATA chunk(s), the endpoint MUST
* immediately send a SACK with no delay. If a packet
* arrives with duplicate DATA chunk(s) bundled with
* new DATA chunks, the endpoint MAY immediately send a
* SACK. Normally receipt of duplicate DATA chunks
* will occur when the original SACK chunk was lost and
* the peer's RTO has expired. The duplicate TSN
* number(s) SHOULD be reported in the SACK as
* duplicate.
*/
asoc->counters[SctpCounterAckState] = 2;
#endif /* 0 */
}
/* sctp_do_TSNdup() */
#undef AP
#undef DEBUG_PRE
#undef DEBUG_POST
/* When the T3-RTX timer expires, it calls this function to create the
/*****************************************************************
* relevant state machine event.
* This the master state function side effect processing function.
*/
*****************************************************************/
void
sctp_generate_t3_rtx_event
(
unsigned
long
peer
)
int
sctp_side_effects
(
sctp_event_t
event_type
,
sctp_subtype_t
subtype
,
sctp_state_t
state
,
sctp_endpoint_t
*
ep
,
sctp_association_t
*
asoc
,
void
*
event_arg
,
sctp_disposition_t
status
,
sctp_cmd_seq_t
*
commands
,
int
priority
)
{
{
int
error
;
int
error
;
struct
sctp_transport
*
transport
=
(
struct
sctp_transport
*
)
peer
;
sctp_association_t
*
asoc
=
transport
->
asoc
;
/* Check whether a task is in the sock. */
sctp_bh_lock_sock
(
asoc
->
base
.
sk
);
/* FIXME - Most of the dispositions left today would be categorized
if
(
sock_owned_by_user
(
asoc
->
base
.
sk
))
{
* as "exceptional" dispositions. For those dispositions, it
SCTP_DEBUG_PRINTK
(
"%s:Sock is busy.
\n
"
,
__FUNCTION__
);
* may not be proper to run through any of the commands at all.
* For example, the command interpreter might be run only with
/* Try again later. */
* disposition SCTP_DISPOSITION_CONSUME.
if
(
!
mod_timer
(
&
transport
->
T3_rtx_timer
,
jiffies
+
(
HZ
/
20
)))
sctp_transport_hold
(
transport
);
goto
out_unlock
;
}
/* Is this transport really dead and just waiting around for
* the timer to let go of the reference?
*/
*/
if
(
transport
->
dead
)
if
(
0
!=
(
error
=
sctp_cmd_interpreter
(
event_type
,
subtype
,
state
,
goto
out_unlock
;
ep
,
asoc
,
event_arg
,
status
,
/* Run through the state machine. */
commands
,
priority
)))
error
=
sctp_do_sm
(
SCTP_EVENT_T_TIMEOUT
,
goto
bail
;
SCTP_ST_TIMEOUT
(
SCTP_EVENT_TIMEOUT_T3_RTX
),
asoc
->
state
,
asoc
->
ep
,
asoc
,
transport
,
GFP_ATOMIC
);
if
(
error
)
asoc
->
base
.
sk
->
err
=
-
error
;
out_unlock:
switch
(
status
)
{
sctp_bh_unlock_sock
(
asoc
->
base
.
sk
);
case
SCTP_DISPOSITION_DISCARD
:
sctp_transport_put
(
transport
);
SCTP_DEBUG_PRINTK
(
"Ignored sctp protocol event - state %d, "
}
"event_type %d, event_id %d
\n
"
,
state
,
event_type
,
subtype
.
chunk
);
break
;
/* This is a sa interface for producing timeout events. It works
case
SCTP_DISPOSITION_NOMEM
:
* for timeouts which use the association as their parameter.
/* We ran out of memory, so we need to discard this
* packet.
*/
*/
static
void
sctp_generate_timeout_event
(
sctp_association_t
*
asoc
,
/* BUG--we should now recover some memory, probably by
sctp_event_timeout_t
timeout_type
)
* reneging...
{
*/
int
error
=
0
;
error
=
-
ENOMEM
;
break
;
sctp_bh_lock_sock
(
asoc
->
base
.
sk
);
if
(
sock_owned_by_user
(
asoc
->
base
.
sk
))
{
SCTP_DEBUG_PRINTK
(
"%s:Sock is busy: timer %d
\n
"
,
__FUNCTION__
,
timeout_type
);
/* Try again later. */
case
SCTP_DISPOSITION_DELETE_TCB
:
if
(
!
mod_timer
(
&
asoc
->
timers
[
timeout_type
],
jiffies
+
(
HZ
/
20
)))
/* This should now be a command. */
sctp_association_hold
(
asoc
);
break
;
goto
out_unlock
;
}
/* Is this association really dead and just waiting around for
case
SCTP_DISPOSITION_CONSUME
:
* the timer to let go of the reference?
case
SCTP_DISPOSITION_ABORT
:
/*
* We should no longer have much work to do here as the
* real work has been done as explicit commands above.
*/
*/
if
(
asoc
->
base
.
dead
)
break
;
goto
out_unlock
;
/* Run through the state machine. */
case
SCTP_DISPOSITION_VIOLATION
:
error
=
sctp_do_sm
(
SCTP_EVENT_T_TIMEOUT
,
printk
(
KERN_ERR
"sctp protocol violation state %d "
SCTP_ST_TIMEOUT
(
timeout_type
),
"chunkid %d
\n
"
,
state
,
subtype
.
chunk
);
asoc
->
state
,
asoc
->
ep
,
asoc
,
break
;
(
void
*
)
timeout_type
,
GFP_ATOMIC
);
if
(
error
)
case
SCTP_DISPOSITION_NOT_IMPL
:
asoc
->
base
.
sk
->
err
=
-
error
;
printk
(
KERN_WARNING
"sctp unimplemented feature in state %d, "
"event_type %d, event_id %d
\n
"
,
state
,
event_type
,
subtype
.
chunk
);
break
;
out_unlock:
case
SCTP_DISPOSITION_BUG
:
sctp_bh_unlock_sock
(
asoc
->
base
.
sk
);
printk
(
KERN_ERR
"sctp bug in state %d, "
sctp_association_put
(
asoc
);
"event_type %d, event_id %d
\n
"
,
}
state
,
event_type
,
subtype
.
chunk
);
BUG
();
break
;
void
sctp_generate_t1_cookie_event
(
unsigned
long
data
)
default:
{
printk
(
KERN_ERR
"sctp impossible disposition %d "
sctp_association_t
*
asoc
=
(
sctp_association_t
*
)
data
;
"in state %d, event_type %d, event_id %d
\n
"
,
sctp_generate_timeout_event
(
asoc
,
SCTP_EVENT_TIMEOUT_T1_COOKIE
);
status
,
state
,
event_type
,
subtype
.
chunk
);
}
BUG
();
break
;
};
void
sctp_generate_t1_init_event
(
unsigned
long
data
)
bail:
{
return
error
;
sctp_association_t
*
asoc
=
(
sctp_association_t
*
)
data
;
sctp_generate_timeout_event
(
asoc
,
SCTP_EVENT_TIMEOUT_T1_INIT
);
}
}
void
sctp_generate_t2_shutdown_event
(
unsigned
long
data
)
/********************************************************************
{
* 2nd Level Abstractions
sctp_association_t
*
asoc
=
(
sctp_association_t
*
)
data
;
********************************************************************/
sctp_generate_timeout_event
(
asoc
,
SCTP_EVENT_TIMEOUT_T2_SHUTDOWN
);
}
void
sctp_generate_t5_shutdown_guard_event
(
unsigned
long
data
)
/* This is the side-effect interpreter. */
int
sctp_cmd_interpreter
(
sctp_event_t
event_type
,
sctp_subtype_t
subtype
,
sctp_state_t
state
,
sctp_endpoint_t
*
ep
,
sctp_association_t
*
asoc
,
void
*
event_arg
,
sctp_disposition_t
status
,
sctp_cmd_seq_t
*
commands
,
int
priority
)
{
{
sctp_association_t
*
asoc
=
(
sctp_association_t
*
)
data
;
int
error
=
0
;
sctp_generate_timeout_event
(
asoc
,
int
force
;
SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD
);
sctp_cmd_t
*
cmd
;
sctp_chunk_t
*
new_obj
;
}
/* sctp_generate_t5_shutdown_guard_event() */
sctp_chunk_t
*
chunk
=
NULL
;
struct
sctp_packet
*
packet
;
struct
list_head
*
pos
;
struct
timer_list
*
timer
;
unsigned
long
timeout
;
struct
sctp_transport
*
t
;
sctp_sackhdr_t
sackh
;
void
sctp_generate_autoclose_event
(
unsigned
long
data
)
if
(
SCTP_EVENT_T_TIMEOUT
!=
event_type
)
{
chunk
=
(
sctp_chunk_t
*
)
event_arg
;
sctp_association_t
*
asoc
=
(
sctp_association_t
*
)
data
;
sctp_generate_timeout_event
(
asoc
,
SCTP_EVENT_TIMEOUT_AUTOCLOSE
);
}
/* Generate a heart beat event. If the sock is busy, reschedule. Make
/* Note: This whole file is a huge candidate for rework.
* sure that the transport is still valid.
* For example, each command could either have its own handler, so
* the loop would look like:
* while (cmds)
* cmd->handle(x, y, z)
* --jgrimm
*/
*/
void
sctp_generate_heartbeat_event
(
unsigned
long
data
)
while
(
NULL
!=
(
cmd
=
sctp_next_cmd
(
commands
)))
{
{
switch
(
cmd
->
verb
)
{
int
error
=
0
;
case
SCTP_CMD_NOP
:
struct
sctp_transport
*
transport
=
(
struct
sctp_transport
*
)
data
;
/* Do nothing. */
sctp_association_t
*
asoc
=
transport
->
asoc
;
break
;
sctp_bh_lock_sock
(
asoc
->
base
.
sk
);
case
SCTP_CMD_NEW_ASOC
:
if
(
sock_owned_by_user
(
asoc
->
base
.
sk
))
{
/* Register a new association. */
SCTP_DEBUG_PRINTK
(
"%s:Sock is busy.
\n
"
,
__FUNCTION__
);
asoc
=
cmd
->
obj
.
ptr
;
/* Register with the endpoint. */
sctp_endpoint_add_asoc
(
ep
,
asoc
);
sctp_hash_established
(
asoc
);
break
;
/* Try again later. */
case
SCTP_CMD_UPDATE_ASSOC
:
if
(
!
mod_timer
(
&
transport
->
hb_timer
,
jiffies
+
(
HZ
/
20
)))
sctp_assoc_update
(
asoc
,
cmd
->
obj
.
ptr
);
sctp_transport_hold
(
transport
);
break
;
goto
out_unlock
;
}
/* Is this structure just waiting around for us to actually
case
SCTP_CMD_PURGE_OUTQUEUE
:
* get destroyed?
sctp_outq_teardown
(
&
asoc
->
outqueue
);
*/
break
;
if
(
transport
->
dead
)
goto
out_unlock
;
error
=
sctp_do_sm
(
SCTP_EVENT_T_TIMEOUT
,
case
SCTP_CMD_DELETE_TCB
:
SCTP_ST_TIMEOUT
(
SCTP_EVENT_TIMEOUT_HEARTBEAT
),
/* Delete the current association. */
asoc
->
state
,
sctp_unhash_established
(
asoc
);
asoc
->
ep
,
asoc
,
sctp_association_free
(
asoc
);
transport
,
GFP_ATOMIC
);
asoc
=
NULL
;
break
;
case
SCTP_CMD_NEW_STATE
:
/* Enter a new state. */
sctp_cmd_new_state
(
commands
,
asoc
,
cmd
->
obj
.
state
);
break
;
if
(
error
)
case
SCTP_CMD_REPORT_TSN
:
asoc
->
base
.
sk
->
err
=
-
error
;
/* Record the arrival of a TSN. */
sctp_tsnmap_mark
(
&
asoc
->
peer
.
tsn_map
,
cmd
->
obj
.
u32
);
break
;
out_unlock:
case
SCTP_CMD_GEN_SACK
:
sctp_bh_unlock_sock
(
asoc
->
base
.
sk
);
/* Generate a Selective ACK.
sctp_transport_put
(
transport
);
* The argument tells us whether to just count
}
* the packet and MAYBE generate a SACK, or
* force a SACK out.
*/
force
=
cmd
->
obj
.
i32
;
error
=
sctp_gen_sack
(
asoc
,
force
,
commands
);
break
;
/* Inject a SACK Timeout event into the state machine. */
case
SCTP_CMD_PROCESS_SACK
:
void
sctp_generate_sack_event
(
unsigned
long
data
)
/* Process an inbound SACK. */
{
error
=
sctp_cmd_process_sack
(
commands
,
asoc
,
sctp_association_t
*
asoc
=
(
sctp_association_t
*
)
data
;
cmd
->
obj
.
ptr
);
sctp_generate_timeout_event
(
asoc
,
SCTP_EVENT_TIMEOUT_SACK
);
break
;
}
sctp_timer_event_t
*
sctp_timer_events
[
SCTP_NUM_TIMEOUT_TYPES
]
=
{
case
SCTP_CMD_GEN_INIT_ACK
:
NULL
,
/* Generate an INIT ACK chunk. */
sctp_generate_t1_cookie_event
,
new_obj
=
sctp_make_init_ack
(
asoc
,
chunk
,
GFP_ATOMIC
,
sctp_generate_t1_init_event
,
0
);
sctp_generate_t2_shutdown_event
,
if
(
!
new_obj
)
NULL
,
goto
nomem
;
sctp_generate_t5_shutdown_guard_event
,
sctp_generate_heartbeat_event
,
sctp_generate_sack_event
,
sctp_generate_autoclose_event
,
};
/********************************************************************
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_REPLY
,
* 3rd Level Abstractions
SCTP_CHUNK
(
new_obj
));
********************************************************************/
break
;
/* RFC 2960 8.2 Path Failure Detection
case
SCTP_CMD_PEER_INIT
:
*
/* Process a unified INIT from the peer.
* When its peer endpoint is multi-homed, an endpoint should keep a
* Note: Only used during INIT-ACK processing. If
* error counter for each of the destination transport addresses of the
* there is an error just return to the outter
* peer endpoint.
* layer which will bail.
*
* Each time the T3-rtx timer expires on any address, or when a
* HEARTBEAT sent to an idle address is not acknowledged within a RTO,
* the error counter of that destination address will be incremented.
* When the value in the error counter exceeds the protocol parameter
* 'Path.Max.Retrans' of that destination address, the endpoint should
* mark the destination transport address as inactive, and a
* notification SHOULD be sent to the upper layer.
*
*/
static
void
sctp_do_8_2_transport_strike
(
sctp_association_t
*
asoc
,
struct
sctp_transport
*
transport
)
{
/* The check for association's overall error counter exceeding the
* threshold is done in the state function.
*/
*/
asoc
->
overall_error_count
++
;
error
=
sctp_cmd_process_init
(
commands
,
asoc
,
chunk
,
cmd
->
obj
.
ptr
,
priority
);
break
;
if
(
transport
->
active
&&
case
SCTP_CMD_GEN_COOKIE_ECHO
:
(
transport
->
error_count
++
>=
transport
->
error_threshold
))
{
/* Generate a COOKIE ECHO chunk. */
SCTP_DEBUG_PRINTK
(
"transport_strike: transport "
new_obj
=
sctp_make_cookie_echo
(
asoc
,
chunk
);
"IP:%d.%d.%d.%d failed.
\n
"
,
if
(
!
new_obj
)
{
NIPQUAD
(
transport
->
ipaddr
.
v4
.
sin_addr
));
if
(
cmd
->
obj
.
ptr
)
sctp_assoc_control_transport
(
asoc
,
transport
,
sctp_free_chunk
(
cmd
->
obj
.
ptr
);
SCTP_TRANSPORT_DOWN
,
goto
nomem
;
SCTP_FAILED_THRESHOLD
);
}
}
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_REPLY
,
SCTP_CHUNK
(
new_obj
));
/* E2) For the destination address for which the timer
/* If there is an ERROR chunk to be sent along with
* expires, set RTO <- RTO * 2 ("back off the timer"). The
* the COOKIE_ECHO, send it, too.
* maximum value discussed in rule C7 above (RTO.max) may be
* used to provide an upper bound to this doubling operation.
*/
*/
transport
->
rto
=
min
((
transport
->
rto
*
2
),
transport
->
asoc
->
rto_max
);
if
(
cmd
->
obj
.
ptr
)
}
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_REPLY
,
SCTP_CHUNK
(
cmd
->
obj
.
ptr
));
/* Worker routine to handle INIT command failure. */
break
;
static
void
sctp_cmd_init_failed
(
sctp_cmd_seq_t
*
commands
,
sctp_association_t
*
asoc
)
{
struct
sctp_ulpevent
*
event
;
event
=
sctp_ulpevent_make_assoc_change
(
asoc
,
case
SCTP_CMD_GEN_SHUTDOWN
:
0
,
/* Generate SHUTDOWN when in SHUTDOWN_SENT state.
SCTP_CANT_STR_ASSOC
,
* Reset error counts.
0
,
0
,
0
,
*/
GFP_ATOMIC
)
;
asoc
->
overall_error_count
=
0
;
if
(
event
)
/* Generate a SHUTDOWN chunk. */
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_EVENT_ULP
,
new_obj
=
sctp_make_shutdown
(
asoc
);
SCTP_ULPEVENT
(
event
));
if
(
!
new_obj
)
goto
nomem
;
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_REPLY
,
SCTP_CHUNK
(
new_obj
));
break
;
/* FIXME: We need to handle data possibly either
case
SCTP_CMD_CHUNK_ULP
:
* sent via COOKIE-ECHO bundling or just waiting in
/* Send a chunk to the sockets layer. */
* the transmit queue, if the user has enabled
SCTP_DEBUG_PRINTK
(
"sm_sideff: %s %p, %s %p.
\n
"
,
* SEND_FAILED notifications.
"chunk_up:"
,
cmd
->
obj
.
ptr
,
*/
"ulpq:"
,
&
asoc
->
ulpq
);
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_DELETE_TCB
,
SCTP_NULL
());
sctp_ulpq_tail_data
(
&
asoc
->
ulpq
,
cmd
->
obj
.
ptr
,
}
GFP_ATOMIC
);
break
;
/* Worker routine to handle SCTP_CMD_ASSOC_FAILED. */
case
SCTP_CMD_EVENT_ULP
:
static
void
sctp_cmd_assoc_failed
(
sctp_cmd_seq_t
*
commands
,
/* Send a notification to the sockets layer. */
sctp_association_t
*
asoc
,
SCTP_DEBUG_PRINTK
(
"sm_sideff: %s %p, %s %p.
\n
"
,
sctp_event_t
event_type
,
"event_up:"
,
cmd
->
obj
.
ptr
,
sctp_subtype_t
subtype
,
"ulpq:"
,
&
asoc
->
ulpq
);
sctp_chunk_t
*
chunk
)
sctp_ulpq_tail_event
(
&
asoc
->
ulpq
,
cmd
->
obj
.
ptr
);
{
break
;
struct
sctp_ulpevent
*
event
;
__u16
error
=
0
;
switch
(
event_type
)
{
case
SCTP_CMD_REPLY
:
case
SCTP_EVENT_T_PRIMITIVE
:
/* Send a chunk to our peer. */
if
(
SCTP_PRIMITIVE_ABORT
==
subtype
.
primitive
)
error
=
sctp_outq_tail
(
&
asoc
->
outqueue
,
error
=
SCTP_ERROR_USER_ABORT
;
cmd
->
obj
.
ptr
)
;
break
;
break
;
case
SCTP_EVENT_T_CHUNK
:
if
(
chunk
&&
(
SCTP_CID_ABORT
==
chunk
->
chunk_hdr
->
type
)
&&
case
SCTP_CMD_SEND_PKT
:
(
ntohs
(
chunk
->
chunk_hdr
->
length
)
>=
/* Send a full packet to our peer. */
(
sizeof
(
struct
sctp_chunkhdr
)
+
packet
=
cmd
->
obj
.
ptr
;
sizeof
(
struct
sctp_errhdr
))))
{
sctp_packet_transmit
(
packet
);
error
=
((
sctp_errhdr_t
*
)
chunk
->
skb
->
data
)
->
cause
;
sctp_ootb_pkt_free
(
packet
);
}
break
;
break
;
default:
case
SCTP_CMD_RETRAN
:
/* Mark a transport for retransmission. */
sctp_retransmit
(
&
asoc
->
outqueue
,
cmd
->
obj
.
transport
,
SCTP_RTXR_T3_RTX
);
break
;
break
;
}
/* Cancel any partial delivery in progress. */
case
SCTP_CMD_TRANSMIT
:
sctp_ulpq_abort_pd
(
&
asoc
->
ulpq
,
GFP_ATOMIC
);
/* Kick start transmission. */
error
=
sctp_outq_flush
(
&
asoc
->
outqueue
,
0
);
break
;
event
=
sctp_ulpevent_make_assoc_change
(
asoc
,
0
,
SCTP_COMM_LOST
,
case
SCTP_CMD_ECN_CE
:
error
,
0
,
0
,
GFP_ATOMIC
);
/* Do delayed CE processing. */
if
(
event
)
sctp_do_ecn_ce_work
(
asoc
,
cmd
->
obj
.
u32
);
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_EVENT_ULP
,
break
;
SCTP_ULPEVENT
(
event
));
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_NEW_STATE
,
case
SCTP_CMD_ECN_ECNE
:
SCTP_STATE
(
SCTP_STATE_CLOSED
));
/* Do delayed ECNE processing. */
new_obj
=
sctp_do_ecn_ecne_work
(
asoc
,
cmd
->
obj
.
u32
,
chunk
);
if
(
new_obj
)
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_REPLY
,
SCTP_CHUNK
(
new_obj
));
break
;
/* FIXME: We need to handle data that could not be sent or was not
case
SCTP_CMD_ECN_CWR
:
* acked, if the user has enabled SEND_FAILED notifications.
/* Do delayed CWR processing. */
*/
sctp_do_ecn_cwr_work
(
asoc
,
cmd
->
obj
.
u32
);
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_DELETE_TCB
,
SCTP_NULL
());
break
;
}
/* Process an init chunk (may be real INIT/INIT-ACK or an embedded INIT
case
SCTP_CMD_SETUP_T2
:
* inside the cookie. In reality, this is only used for INIT-ACK processing
sctp_cmd_setup_t2
(
commands
,
asoc
,
cmd
->
obj
.
ptr
);
* since all other cases use "temporary" associations and can do all
break
;
* their work in statefuns directly.
*/
static
int
sctp_cmd_process_init
(
sctp_cmd_seq_t
*
commands
,
sctp_association_t
*
asoc
,
sctp_chunk_t
*
chunk
,
sctp_init_chunk_t
*
peer_init
,
int
priority
)
{
int
error
;
/* We only process the init as a sideeffect in a single
case
SCTP_CMD_TIMER_START
:
* case. This is when we process the INIT-ACK. If we
timer
=
&
asoc
->
timers
[
cmd
->
obj
.
to
];
* fail during INIT processing (due to malloc problems),
timeout
=
asoc
->
timeouts
[
cmd
->
obj
.
to
];
* just return the error and stop processing the stack.
if
(
!
timeout
)
*/
BUG
();
if
(
!
sctp_process_init
(
asoc
,
chunk
->
chunk_hdr
->
type
,
timer
->
expires
=
jiffies
+
timeout
;
sctp_source
(
chunk
),
peer_init
,
sctp_association_hold
(
asoc
);
priority
))
add_timer
(
timer
);
error
=
-
ENOMEM
;
break
;
else
error
=
0
;
return
error
;
case
SCTP_CMD_TIMER_RESTART
:
}
timer
=
&
asoc
->
timers
[
cmd
->
obj
.
to
];
timeout
=
asoc
->
timeouts
[
cmd
->
obj
.
to
];
if
(
!
mod_timer
(
timer
,
jiffies
+
timeout
))
sctp_association_hold
(
asoc
);
break
;
/* Helper function to break out starting up of heartbeat timers. */
case
SCTP_CMD_TIMER_STOP
:
static
void
sctp_cmd_hb_timers_start
(
sctp_cmd_seq_t
*
cmds
,
timer
=
&
asoc
->
timers
[
cmd
->
obj
.
to
];
sctp_association_t
*
asoc
)
if
(
timer_pending
(
timer
)
&&
del_timer
(
timer
))
{
sctp_association_put
(
asoc
);
struct
sctp_transport
*
t
;
break
;
struct
list_head
*
pos
;
/* Start a heartbeat timer for each transport on the association.
case
SCTP_CMD_INIT_RESTART
:
* hold a reference on the transport to make sure none of
/* Do the needed accounting and updates
* the needed data structures go away.
* associated with restarting an initialization
* timer.
*/
*/
list_for_each
(
pos
,
&
asoc
->
peer
.
transport_addr_list
)
{
asoc
->
counters
[
SCTP_COUNTER_INIT_ERROR
]
++
;
t
=
list_entry
(
pos
,
struct
sctp_transport
,
transports
);
asoc
->
timeouts
[
cmd
->
obj
.
to
]
*=
2
;
if
(
asoc
->
timeouts
[
cmd
->
obj
.
to
]
>
if
(
!
mod_timer
(
&
t
->
hb_timer
,
sctp_transport_timeout
(
t
)))
asoc
->
max_init_timeo
)
{
sctp_transport_hold
(
t
);
asoc
->
timeouts
[
cmd
->
obj
.
to
]
=
asoc
->
max_init_timeo
;
}
}
}
static
void
sctp_cmd_hb_timers_stop
(
sctp_cmd_seq_t
*
cmds
,
sctp_association_t
*
asoc
)
{
struct
sctp_transport
*
t
;
struct
list_head
*
pos
;
/* Stop all heartbeat timers. */
/* If we've sent any data bundled with
* COOKIE-ECHO we need to resend.
*/
list_for_each
(
pos
,
&
asoc
->
peer
.
transport_addr_list
)
{
list_for_each
(
pos
,
&
asoc
->
peer
.
transport_addr_list
)
{
t
=
list_entry
(
pos
,
struct
sctp_transport
,
transports
);
t
=
list_entry
(
pos
,
struct
sctp_transport
,
if
(
del_timer
(
&
t
->
hb_timer
))
transports
);
sctp_transport_put
(
t
);
sctp_retransmit_mark
(
&
asoc
->
outqueue
,
t
,
0
);
}
}
}
/* Helper function to update the heartbeat timer. */
sctp_add_cmd_sf
(
commands
,
static
void
sctp_cmd_hb_timer_update
(
sctp_cmd_seq_t
*
cmds
,
SCTP_CMD_TIMER_RESTART
,
sctp_association_t
*
asoc
,
SCTP_TO
(
cmd
->
obj
.
to
));
struct
sctp_transport
*
t
)
break
;
{
/* Update the heartbeat timer. */
if
(
!
mod_timer
(
&
t
->
hb_timer
,
sctp_transport_timeout
(
t
)))
sctp_transport_hold
(
t
);
}
/* Helper function to handle the reception of an HEARTBEAT ACK. */
case
SCTP_CMD_INIT_FAILED
:
static
void
sctp_cmd_transport_on
(
sctp_cmd_seq_t
*
cmds
,
sctp_cmd_init_failed
(
commands
,
asoc
);
sctp_association_t
*
asoc
,
break
;
struct
sctp_transport
*
t
,
sctp_chunk_t
*
chunk
)
{
sctp_sender_hb_info_t
*
hbinfo
;
/* 8.3 Upon the receipt of the HEARTBEAT ACK, the sender of the
case
SCTP_CMD_ASSOC_FAILED
:
* HEARTBEAT should clear the error counter of the destination
sctp_cmd_assoc_failed
(
commands
,
asoc
,
event_type
,
* transport address to which the HEARTBEAT was sent.
subtype
,
chunk
);
* The association's overall error count is also cleared.
break
;
*/
t
->
error_count
=
0
;
t
->
asoc
->
overall_error_count
=
0
;
/* Mark the destination transport address as active if it is not so
case
SCTP_CMD_COUNTER_INC
:
* marked.
asoc
->
counters
[
cmd
->
obj
.
counter
]
++
;
*/
break
;
if
(
!
t
->
active
)
sctp_assoc_control_transport
(
asoc
,
t
,
SCTP_TRANSPORT_UP
,
SCTP_HEARTBEAT_SUCCESS
);
/* The receiver of the HEARTBEAT ACK should also perform an
case
SCTP_CMD_COUNTER_RESET
:
* RTT measurement for that destination transport address
asoc
->
counters
[
cmd
->
obj
.
counter
]
=
0
;
* using the time value carried in the HEARTBEAT ACK chunk.
break
;
*/
hbinfo
=
(
sctp_sender_hb_info_t
*
)
chunk
->
skb
->
data
;
sctp_transport_update_rto
(
t
,
(
jiffies
-
hbinfo
->
sent_at
));
}
/* Helper function to do a transport reset at the expiry of the hearbeat
case
SCTP_CMD_REPORT_DUP
:
* timer.
sctp_tsnmap_mark_dup
(
&
asoc
->
peer
.
tsn_map
,
*/
cmd
->
obj
.
u32
);
static
void
sctp_cmd_transport_reset
(
sctp_cmd_seq_t
*
cmds
,
break
;
sctp_association_t
*
asoc
,
struct
sctp_transport
*
t
)
case
SCTP_CMD_REPORT_BAD_TAG
:
{
SCTP_DEBUG_PRINTK
(
"vtag mismatch!
\n
"
);
sctp_transport_lower_cwnd
(
t
,
SCTP_LOWER_CWND_INACTIVE
)
;
break
;
case
SCTP_CMD_STRIKE
:
/* Mark one strike against a transport. */
/* Mark one strike against a transport. */
sctp_do_8_2_transport_strike
(
asoc
,
t
);
sctp_do_8_2_transport_strike
(
asoc
,
cmd
->
obj
.
transpor
t
);
}
break
;
/* Helper function to process the process SACK command. */
case
SCTP_CMD_TRANSPORT_RESET
:
static
int
sctp_cmd_process_sack
(
sctp_cmd_seq_t
*
cmds
,
t
=
cmd
->
obj
.
transport
;
sctp_association_t
*
asoc
,
sctp_cmd_transport_reset
(
commands
,
asoc
,
t
);
sctp_sackhdr_t
*
sackh
)
break
;
{
int
err
;
if
(
sctp_outq_sack
(
&
asoc
->
outqueue
,
sackh
))
{
case
SCTP_CMD_TRANSPORT_ON
:
/* There are no more TSNs awaiting SACK. */
t
=
cmd
->
obj
.
transport
;
err
=
sctp_do_sm
(
SCTP_EVENT_T_OTHER
,
sctp_cmd_transport_on
(
commands
,
asoc
,
t
,
chunk
);
SCTP_ST_OTHER
(
SCTP_EVENT_NO_PENDING_TSN
),
break
;
asoc
->
state
,
asoc
->
ep
,
asoc
,
NULL
,
GFP_ATOMIC
);
}
else
{
/* Windows may have opened, so we need
* to check if we have DATA to transmit
*/
err
=
sctp_outq_flush
(
&
asoc
->
outqueue
,
0
);
}
return
err
;
case
SCTP_CMD_HB_TIMERS_START
:
}
sctp_cmd_hb_timers_start
(
commands
,
asoc
);
break
;
/* Helper function to set the timeout value for T2-SHUTDOWN timer and to set
case
SCTP_CMD_HB_TIMER_UPDATE
:
* the transport for a shutdown chunk.
t
=
cmd
->
obj
.
transport
;
*/
sctp_cmd_hb_timer_update
(
commands
,
asoc
,
t
);
static
void
sctp_cmd_setup_t2
(
sctp_cmd_seq_t
*
cmds
,
sctp_association_t
*
asoc
,
break
;
sctp_chunk_t
*
chunk
)
{
struct
sctp_transport
*
t
;
t
=
sctp_assoc_choose_shutdown_transport
(
asoc
);
case
SCTP_CMD_HB_TIMERS_STOP
:
asoc
->
shutdown_last_sent_to
=
t
;
sctp_cmd_hb_timers_stop
(
commands
,
asoc
);
asoc
->
timeouts
[
SCTP_EVENT_TIMEOUT_T2_SHUTDOWN
]
=
t
->
rto
;
break
;
chunk
->
transport
=
t
;
}
/* Helper function to change the state of an association. */
case
SCTP_CMD_REPORT_ERROR
:
static
void
sctp_cmd_new_state
(
sctp_cmd_seq_t
*
cmds
,
sctp_association_t
*
asoc
,
error
=
cmd
->
obj
.
error
;
sctp_state_t
state
)
break
;
{
struct
sock
*
sk
=
asoc
->
base
.
sk
;
case
SCTP_CMD_PROCESS_CTSN
:
struct
sctp_opt
*
sp
=
sctp_sk
(
sk
);
/* Dummy up a SACK for processing. */
sackh
.
cum_tsn_ack
=
cmd
->
obj
.
u32
;
sackh
.
a_rwnd
=
0
;
sackh
.
num_gap_ack_blocks
=
0
;
sackh
.
num_dup_tsns
=
0
;
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_PROCESS_SACK
,
SCTP_SACKH
(
&
sackh
));
break
;
asoc
->
state
=
state
;
case
SCTP_CMD_DISCARD_PACKET
:
asoc
->
state_timestamp
=
jiffies
;
/* We need to discard the whole packet. */
chunk
->
pdiscard
=
1
;
break
;
if
((
SCTP_STATE_ESTABLISHED
==
asoc
->
state
)
||
case
SCTP_CMD_RTO_PENDING
:
(
SCTP_STATE_CLOSED
==
asoc
->
state
))
{
t
=
cmd
->
obj
.
transport
;
/* Wake up any processes waiting in the asoc's wait queue in
t
->
rto_pending
=
1
;
* sctp_wait_for_connect() or sctp_wait_for_sndbuf().
break
;
*/
if
(
waitqueue_active
(
&
asoc
->
wait
))
wake_up_interruptible
(
&
asoc
->
wait
);
/* Wake up any processes waiting in the sk's sleep queue of
case
SCTP_CMD_PART_DELIVER
:
* a TCP-style or UDP-style peeled-off socket in
sctp_ulpq_partial_delivery
(
&
asoc
->
ulpq
,
cmd
->
obj
.
ptr
,
* sctp_wait_for_accept() or sctp_wait_for_packet().
GFP_ATOMIC
);
* For a UDP-style socket, the waiters are woken up by the
break
;
* notifications.
*/
case
SCTP_CMD_RENEGE
:
if
(
SCTP_SOCKET_UDP
!=
sp
->
type
)
sctp_ulpq_renege
(
&
asoc
->
ulpq
,
cmd
->
obj
.
ptr
,
sk
->
state_change
(
sk
);
GFP_ATOMIC
);
break
;
default:
printk
(
KERN_WARNING
"Impossible command: %u, %p
\n
"
,
cmd
->
verb
,
cmd
->
obj
.
ptr
);
break
;
};
if
(
error
)
return
error
;
}
}
/* Change the sk->state of a TCP-style socket that has sucessfully
return
error
;
* completed a connect() call.
*/
nomem:
if
((
SCTP_STATE_ESTABLISHED
==
asoc
->
state
)
&&
error
=
-
ENOMEM
;
(
SCTP_SOCKET_TCP
==
sp
->
type
)
&&
(
SCTP_SS_CLOSED
==
sk
->
state
))
return
error
;
sk
->
state
=
SCTP_SS_ESTABLISHED
;
}
}
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment