Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
a33b4399
Commit
a33b4399
authored
Mar 24, 2003
by
Jon Grimm
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
[SCTP] Add icmpv6 handler to SCTP.
parent
d6b6fece
Changes
6
Show whitespace changes
Inline
Side-by-side
Showing
6 changed files
with
1131 additions
and
1115 deletions
+1131
-1115
include/net/sctp/constants.h
include/net/sctp/constants.h
+3
-5
include/net/sctp/sctp.h
include/net/sctp/sctp.h
+19
-19
net/sctp/input.c
net/sctp/input.c
+110
-64
net/sctp/ipv6.c
net/sctp/ipv6.c
+63
-18
net/sctp/outqueue.c
net/sctp/outqueue.c
+5
-4
net/sctp/sm_sideeffect.c
net/sctp/sm_sideeffect.c
+931
-1005
No files found.
include/net/sctp/constants.h
View file @
a33b4399
...
...
@@ -138,12 +138,10 @@ typedef enum {
*/
typedef
union
{
sctp_cid_t
chunk
;
sctp_event_timeout_t
timeout
;
sctp_event_other_t
other
;
sctp_event_primitive_t
primitive
;
}
sctp_subtype_t
;
#define SCTP_SUBTYPE_CONSTRUCTOR(_name, _type, _elt) \
...
...
@@ -421,9 +419,9 @@ typedef enum {
/* Reasons to retransmit. */
typedef
enum
{
SCTP_R
ETRANSMIT
_T3_RTX
,
SCTP_R
ETRANSMIT
_FAST_RTX
,
SCTP_R
ETRANSMIT_PMTU_DISCOVERY
,
SCTP_R
TXR
_T3_RTX
,
SCTP_R
TXR
_FAST_RTX
,
SCTP_R
TXR_PMTUD
,
}
sctp_retransmit_reason_t
;
/* Reasons to lower cwnd. */
...
...
include/net/sctp/sctp.h
View file @
a33b4399
...
...
@@ -130,7 +130,7 @@ extern struct sctp_pf *sctp_get_pf_specific(sa_family_t family);
extern
int
sctp_register_pf
(
struct
sctp_pf
*
,
sa_family_t
);
/*
* sctp
_
socket.c
* sctp
/
socket.c
*/
extern
int
sctp_backlog_rcv
(
struct
sock
*
sk
,
struct
sk_buff
*
skb
);
extern
int
sctp_inet_listen
(
struct
socket
*
sock
,
int
backlog
);
...
...
@@ -139,7 +139,7 @@ extern unsigned int sctp_poll(struct file *file, struct socket *sock,
poll_table
*
wait
);
/*
* sctp
_
primitive.c
* sctp
/
primitive.c
*/
extern
int
sctp_primitive_ASSOCIATE
(
sctp_association_t
*
,
void
*
arg
);
extern
int
sctp_primitive_SHUTDOWN
(
sctp_association_t
*
,
void
*
arg
);
...
...
@@ -148,14 +148,14 @@ extern int sctp_primitive_SEND(sctp_association_t *, void *arg);
extern
int
sctp_primitive_REQUESTHEARTBEAT
(
sctp_association_t
*
,
void
*
arg
);
/*
* sctp
_
crc32c.c
* sctp
/
crc32c.c
*/
extern
__u32
sctp_start_cksum
(
__u8
*
ptr
,
__u16
count
);
extern
__u32
sctp_update_cksum
(
__u8
*
ptr
,
__u16
count
,
__u32
cksum
);
extern
__u32
sctp_end_cksum
(
__u32
cksum
);
/*
* sctp
_
input.c
* sctp
/
input.c
*/
extern
int
sctp_rcv
(
struct
sk_buff
*
skb
);
extern
void
sctp_v4_err
(
struct
sk_buff
*
skb
,
u32
info
);
...
...
@@ -170,9 +170,16 @@ extern void __sctp_unhash_endpoint(sctp_endpoint_t *);
extern
sctp_association_t
*
__sctp_lookup_association
(
const
union
sctp_addr
*
,
const
union
sctp_addr
*
,
struct
sctp_transport
**
);
extern
struct
sock
*
sctp_err_lookup
(
int
family
,
struct
sk_buff
*
,
struct
sctphdr
*
,
struct
sctp_endpoint
**
,
struct
sctp_association
**
,
struct
sctp_transport
**
);
extern
void
sctp_err_finish
(
struct
sock
*
,
struct
sctp_endpoint
*
,
struct
sctp_association
*
);
extern
void
sctp_icmp_frag_needed
(
struct
sock
*
,
struct
sctp_association
*
,
struct
sctp_transport
*
t
,
__u32
pmtu
);
/*
* sctp
_
hashdriver.c
* sctp
/
hashdriver.c
*/
extern
void
sctp_hash_digest
(
const
char
*
secret
,
const
int
secret_len
,
const
char
*
text
,
const
int
text_len
,
...
...
@@ -184,9 +191,7 @@ extern void sctp_hash_digest(const char *secret, const int secret_len,
#ifdef TEST_FRAME
#include <test_frame.h>
#else
/* spin lock wrappers. */
...
...
@@ -312,7 +317,6 @@ static inline void sctp_sysctl_register(void) { return; }
static
inline
void
sctp_sysctl_unregister
(
void
)
{
return
;
}
#endif
/* Size of Supported Address Parameter for 'x' address types. */
#define SCTP_SAT_LEN(x) (sizeof(struct sctp_paramhdr) + (x) * sizeof(__u16))
...
...
@@ -320,19 +324,15 @@ static inline void sctp_sysctl_unregister(void) { return; }
extern
int
sctp_v6_init
(
void
);
extern
void
sctp_v6_exit
(
void
);
static
inline
int
sctp_ipv6_addr_type
(
const
struct
in6_addr
*
addr
)
{
return
ipv6_addr_type
((
struct
in6_addr
*
)
addr
);
}
extern
void
sctp_v6_err
(
struct
sk_buff
*
skb
,
struct
inet6_skb_parm
*
opt
,
int
type
,
int
code
,
int
offset
,
__u32
info
);
#else
/* #ifdef defined(CONFIG_IPV6)
|| defined(CONFIG_IPV6_MODULE)
*/
#else
/* #ifdef defined(CONFIG_IPV6) */
#define sctp_ipv6_addr_type(a) 0
static
inline
int
sctp_v6_init
(
void
)
{
return
0
;
}
static
inline
void
sctp_v6_exit
(
void
)
{
return
;
}
#endif
/* #ifdef defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) */
#endif
/* #if defined(CONFIG_IPV6) */
/* Map an association to an assoc_id. */
static
inline
sctp_assoc_t
sctp_assoc2id
(
const
sctp_association_t
*
asoc
)
...
...
@@ -546,7 +546,7 @@ struct sctp_sock {
struct
sock
sk
;
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
struct
ipv6_pinfo
*
pinet6
;
#endif
/* CONFIG_IPV6
|| CONFIG_IPV6_MODULE
*/
#endif
/* CONFIG_IPV6 */
struct
inet_opt
inet
;
struct
sctp_opt
sctp
;
};
...
...
@@ -559,7 +559,7 @@ struct sctp6_sock {
struct
sctp_opt
sctp
;
struct
ipv6_pinfo
inet6
;
};
#endif
/* CONFIG_IPV6
|| CONFIG_IPV6_MODULE
*/
#endif
/* CONFIG_IPV6 */
#define sctp_sk(__sk) (&((struct sctp_sock *)__sk)->sctp)
...
...
net/sctp/input.c
View file @
a33b4399
...
...
@@ -205,21 +205,19 @@ int sctp_rcv(struct sk_buff *skb)
*/
sctp_bh_lock_sock
(
sk
);
if
(
sock_owned_by_user
(
sk
))
{
if
(
sock_owned_by_user
(
sk
))
sk_add_backlog
(
sk
,
(
struct
sk_buff
*
)
chunk
);
}
else
{
else
sctp_backlog_rcv
(
sk
,
(
struct
sk_buff
*
)
chunk
);
}
/* Release the sock and any reference counts we took in the
* lookup calls.
*/
sctp_bh_unlock_sock
(
sk
);
if
(
asoc
)
{
if
(
asoc
)
sctp_association_put
(
asoc
);
}
else
{
else
sctp_endpoint_put
(
ep
);
}
sock_put
(
sk
);
return
ret
;
...
...
@@ -266,10 +264,8 @@ int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb)
}
/* Handle icmp frag needed error. */
static
inline
void
sctp_icmp_frag_needed
(
struct
sock
*
sk
,
sctp_association_t
*
asoc
,
struct
sctp_transport
*
transport
,
__u32
pmtu
)
void
sctp_icmp_frag_needed
(
struct
sock
*
sk
,
struct
sctp_association
*
asoc
,
struct
sctp_transport
*
t
,
__u32
pmtu
)
{
if
(
unlikely
(
pmtu
<
SCTP_DEFAULT_MINSEGMENT
))
{
printk
(
KERN_WARNING
"%s: Reported pmtu %d too low, "
...
...
@@ -278,54 +274,38 @@ static inline void sctp_icmp_frag_needed(struct sock *sk,
pmtu
=
SCTP_DEFAULT_MINSEGMENT
;
}
if
(
!
sock_owned_by_user
(
sk
)
&&
t
ransport
&&
(
transpor
t
->
pmtu
!=
pmtu
))
{
t
ransport
->
pmtu
=
pmtu
;
if
(
!
sock_owned_by_user
(
sk
)
&&
t
&&
(
t
->
pmtu
!=
pmtu
))
{
t
->
pmtu
=
pmtu
;
sctp_assoc_sync_pmtu
(
asoc
);
sctp_retransmit
(
&
asoc
->
outqueue
,
transport
,
SCTP_RETRANSMIT_PMTU_DISCOVERY
);
sctp_retransmit
(
&
asoc
->
outqueue
,
t
,
SCTP_RTXR_PMTUD
);
}
}
/*
* This routine is called by the ICMP module when it gets some
* sort of error condition. If err < 0 then the socket should
* be closed and the error returned to the user. If err > 0
* it's just the icmp type << 8 | icmp code. After adjustment
* header points to the first 8 bytes of the sctp header. We need
* to find the appropriate port.
*
* The locking strategy used here is very "optimistic". When
* someone else accesses the socket the ICMP is just dropped
* and for some paths there is no check at all.
* A more general error queue to queue errors for later handling
* is probably better.
*
*/
void
sctp_v4_err
(
struct
sk_buff
*
skb
,
__u32
info
)
/* Common lookup code for icmp/icmpv6 error handler. */
struct
sock
*
sctp_err_lookup
(
int
family
,
struct
sk_buff
*
skb
,
struct
sctphdr
*
sctphdr
,
struct
sctp_endpoint
**
epp
,
struct
sctp_association
**
app
,
struct
sctp_transport
**
tpp
)
{
struct
iphdr
*
iph
=
(
struct
iphdr
*
)
skb
->
data
;
struct
sctphdr
*
sh
=
(
struct
sctphdr
*
)(
skb
->
data
+
(
iph
->
ihl
<<
2
));
int
type
=
skb
->
h
.
icmph
->
type
;
int
code
=
skb
->
h
.
icmph
->
code
;
union
sctp_addr
saddr
,
daddr
;
struct
inet_opt
*
inet
;
union
sctp_addr
saddr
;
union
sctp_addr
daddr
;
struct
sctp_af
*
af
;
struct
sock
*
sk
=
NULL
;
sctp_endpoint_t
*
ep
=
NULL
;
sctp_association_t
*
asoc
=
NULL
;
struct
sctp_transport
*
transport
;
int
err
;
struct
sctp_endpoint
*
ep
=
NULL
;
struct
sctp_association
*
asoc
=
NULL
;
struct
sctp_transport
*
transport
=
NULL
;
if
(
skb
->
len
<
((
iph
->
ihl
<<
2
)
+
8
))
{
ICMP_INC_STATS_BH
(
IcmpInErrors
);
return
;
*
app
=
NULL
;
*
epp
=
NULL
;
*
tpp
=
NULL
;
af
=
sctp_get_af_specific
(
family
);
if
(
unlikely
(
!
af
))
{
return
NULL
;
}
saddr
.
v4
.
sin_family
=
AF_INET
;
saddr
.
v4
.
sin_port
=
ntohs
(
sh
->
source
);
memcpy
(
&
saddr
.
v4
.
sin_addr
.
s_addr
,
&
iph
->
saddr
,
sizeof
(
struct
in_addr
));
daddr
.
v4
.
sin_family
=
AF_INET
;
daddr
.
v4
.
sin_port
=
ntohs
(
sh
->
dest
);
memcpy
(
&
daddr
.
v4
.
sin_addr
.
s_addr
,
&
iph
->
daddr
,
sizeof
(
struct
in_addr
));
/* Initialize local addresses for lookups. */
af
->
from_skb
(
&
saddr
,
skb
,
1
);
af
->
from_skb
(
&
daddr
,
skb
,
0
);
/* Look for an association that matches the incoming ICMP error
* packet.
...
...
@@ -338,13 +318,12 @@ void sctp_v4_err(struct sk_buff *skb, __u32 info)
*/
ep
=
__sctp_rcv_lookup_endpoint
(
&
daddr
);
if
(
!
ep
)
{
ICMP_INC_STATS_BH
(
IcmpInErrors
);
return
;
return
NULL
;
}
}
if
(
asoc
)
{
if
(
ntohl
(
s
h
->
vtag
)
!=
asoc
->
c
.
peer_vtag
)
{
if
(
ntohl
(
s
ctphdr
->
vtag
)
!=
asoc
->
c
.
peer_vtag
)
{
ICMP_INC_STATS_BH
(
IcmpInErrors
);
goto
out
;
}
...
...
@@ -353,12 +332,90 @@ void sctp_v4_err(struct sk_buff *skb, __u32 info)
sk
=
ep
->
base
.
sk
;
sctp_bh_lock_sock
(
sk
);
/* If too many ICMPs get dropped on busy
* servers this needs to be solved differently.
*/
if
(
sock_owned_by_user
(
sk
))
NET_INC_STATS_BH
(
LockDroppedIcmps
);
*
epp
=
ep
;
*
app
=
asoc
;
*
tpp
=
transport
;
return
sk
;
out:
sock_put
(
sk
);
if
(
asoc
)
sctp_association_put
(
asoc
);
if
(
ep
)
sctp_endpoint_put
(
ep
);
return
NULL
;
}
/* Common cleanup code for icmp/icmpv6 error handler. */
void
sctp_err_finish
(
struct
sock
*
sk
,
struct
sctp_endpoint
*
ep
,
struct
sctp_association
*
asoc
)
{
sctp_bh_unlock_sock
(
sk
);
sock_put
(
sk
);
if
(
asoc
)
sctp_association_put
(
asoc
);
if
(
ep
)
sctp_endpoint_put
(
ep
);
}
/*
* This routine is called by the ICMP module when it gets some
* sort of error condition. If err < 0 then the socket should
* be closed and the error returned to the user. If err > 0
* it's just the icmp type << 8 | icmp code. After adjustment
* header points to the first 8 bytes of the sctp header. We need
* to find the appropriate port.
*
* The locking strategy used here is very "optimistic". When
* someone else accesses the socket the ICMP is just dropped
* and for some paths there is no check at all.
* A more general error queue to queue errors for later handling
* is probably better.
*
*/
void
sctp_v4_err
(
struct
sk_buff
*
skb
,
__u32
info
)
{
struct
iphdr
*
iph
=
(
struct
iphdr
*
)
skb
->
data
;
struct
sctphdr
*
sh
=
(
struct
sctphdr
*
)(
skb
->
data
+
(
iph
->
ihl
<<
2
));
int
type
=
skb
->
h
.
icmph
->
type
;
int
code
=
skb
->
h
.
icmph
->
code
;
struct
sock
*
sk
;
sctp_endpoint_t
*
ep
;
sctp_association_t
*
asoc
;
struct
sctp_transport
*
transport
;
struct
inet_opt
*
inet
;
char
*
saveip
,
*
savesctp
;
int
err
;
if
(
skb
->
len
<
((
iph
->
ihl
<<
2
)
+
8
))
{
ICMP_INC_STATS_BH
(
IcmpInErrors
);
return
;
}
/* Fix up skb to look at the embedded net header. */
saveip
=
skb
->
nh
.
raw
;
savesctp
=
skb
->
h
.
raw
;
skb
->
nh
.
iph
=
iph
;
skb
->
h
.
raw
=
(
char
*
)
sh
;
sk
=
sctp_err_lookup
(
AF_INET
,
skb
,
sh
,
&
ep
,
&
asoc
,
&
transport
);
/* Put back, the original pointers. */
skb
->
nh
.
raw
=
saveip
;
skb
->
h
.
raw
=
savesctp
;
if
(
!
sk
)
{
ICMP_INC_STATS_BH
(
IcmpInErrors
);
return
;
}
/* Warning: The sock lock is held. Remember to call
* sctp_err_finish!
*/
switch
(
type
)
{
case
ICMP_PARAMETERPROB
:
err
=
EPROTO
;
...
...
@@ -397,13 +454,7 @@ void sctp_v4_err(struct sk_buff *skb, __u32 info)
}
out_unlock:
sctp_bh_unlock_sock
(
sk
);
out:
sock_put
(
sk
);
if
(
asoc
)
sctp_association_put
(
asoc
);
if
(
ep
)
sctp_endpoint_put
(
ep
);
sctp_err_finish
(
sk
,
ep
,
asoc
);
}
/*
...
...
@@ -780,8 +831,3 @@ sctp_association_t *__sctp_rcv_lookup(struct sk_buff *skb,
return
asoc
;
}
net/sctp/ipv6.c
View file @
a33b4399
/* SCTP kernel reference Implementation
* Copyright (c) 2001 Nokia, Inc.
* Copyright (c) 2001 La Monte H.P. Yarroll
* Copyright (c) 2002 International Business Machines, Corp.
* Copyright (c) 2002
-2003
International Business Machines, Corp.
*
* This file is part of the SCTP kernel reference Implementation
*
...
...
@@ -88,17 +88,62 @@ extern struct notifier_block sctp_inetaddr_notifier;
ntohs((addr)->s6_addr16[6]), \
ntohs((addr)->s6_addr16[7])
/* FIXME: Comments. */
static
inline
void
sctp_v6_err
(
struct
sk_buff
*
skb
,
struct
inet6_skb_parm
*
opt
,
/* ICMP error handler. */
void
sctp_v6_err
(
struct
sk_buff
*
skb
,
struct
inet6_skb_parm
*
opt
,
int
type
,
int
code
,
int
offset
,
__u32
info
)
{
/* BUG. WRITE ME. */
struct
ipv6hdr
*
iph
=
(
struct
ipv6hdr
*
)
skb
->
data
;
struct
sctphdr
*
sh
=
(
struct
sctphdr
*
)(
skb
->
data
+
offset
);
struct
sock
*
sk
;
sctp_endpoint_t
*
ep
;
sctp_association_t
*
asoc
;
struct
sctp_transport
*
transport
;
struct
ipv6_pinfo
*
np
;
char
*
saveip
,
*
savesctp
;
int
err
;
/* Fix up skb to look at the embedded net header. */
saveip
=
skb
->
nh
.
raw
;
savesctp
=
skb
->
h
.
raw
;
skb
->
nh
.
ipv6h
=
iph
;
skb
->
h
.
raw
=
(
char
*
)
sh
;
sk
=
sctp_err_lookup
(
AF_INET6
,
skb
,
sh
,
&
ep
,
&
asoc
,
&
transport
);
/* Put back, the original pointers. */
skb
->
nh
.
raw
=
saveip
;
skb
->
h
.
raw
=
savesctp
;
if
(
!
sk
)
{
ICMP6_INC_STATS_BH
(
Icmp6InErrors
);
return
;
}
/* Warning: The sock lock is held. Remember to call
* sctp_err_finish!
*/
switch
(
type
)
{
case
ICMPV6_PKT_TOOBIG
:
sctp_icmp_frag_needed
(
sk
,
asoc
,
transport
,
ntohl
(
info
));
goto
out_unlock
;
default:
break
;
}
np
=
inet6_sk
(
sk
);
icmpv6_err_convert
(
type
,
code
,
&
err
);
if
(
!
sock_owned_by_user
(
sk
)
&&
np
->
recverr
)
{
sk
->
err
=
err
;
sk
->
error_report
(
sk
);
}
else
{
/* Only an error on timeout */
sk
->
err_soft
=
err
;
}
out_unlock:
sctp_err_finish
(
sk
,
ep
,
asoc
);
}
/* Based on tcp_v6_xmit() in tcp_ipv6.c. */
static
in
line
int
sctp_v6_xmit
(
struct
sk_buff
*
skb
,
struct
sctp_transport
*
transport
,
int
ipfragok
)
static
in
t
sctp_v6_xmit
(
struct
sk_buff
*
skb
,
struct
sctp_transport
*
transport
,
int
ipfragok
)
{
struct
sock
*
sk
=
skb
->
sk
;
struct
ipv6_pinfo
*
np
=
inet6_sk
(
sk
);
...
...
net/sctp/outqueue.c
View file @
a33b4399
...
...
@@ -357,7 +357,7 @@ void sctp_retransmit(struct sctp_outq *q, struct sctp_transport *transport,
__u8
fast_retransmit
=
0
;
switch
(
reason
)
{
case
SCTP_R
ETRANSMIT
_T3_RTX
:
case
SCTP_R
TXR
_T3_RTX
:
sctp_transport_lower_cwnd
(
transport
,
SCTP_LOWER_CWND_T3_RTX
);
/* Update the retran path if the T3-rtx timer has expired for
* the current retran path.
...
...
@@ -365,10 +365,11 @@ void sctp_retransmit(struct sctp_outq *q, struct sctp_transport *transport,
if
(
transport
==
transport
->
asoc
->
peer
.
retran_path
)
sctp_assoc_update_retran_path
(
transport
->
asoc
);
break
;
case
SCTP_R
ETRANSMIT
_FAST_RTX
:
case
SCTP_R
TXR
_FAST_RTX
:
sctp_transport_lower_cwnd
(
transport
,
SCTP_LOWER_CWND_FAST_RTX
);
fast_retransmit
=
1
;
break
;
case
SCTP_RTXR_PMTUD
:
default:
break
;
}
...
...
@@ -876,7 +877,7 @@ int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout)
start_timer
=
0
;
queue
=
&
q
->
out
;
while
(
chunk
=
sctp_outq_dequeue_data
(
q
))
{
while
(
(
chunk
=
sctp_outq_dequeue_data
(
q
)
))
{
/* RFC 2960 6.5 Every DATA chunk MUST carry a valid
* stream identifier.
*/
...
...
@@ -1570,7 +1571,7 @@ static void sctp_check_transmitted(struct sctp_outq *q,
if
(
transport
)
{
if
(
do_fast_retransmit
)
sctp_retransmit
(
q
,
transport
,
SCTP_R
ETRANSMIT
_FAST_RTX
);
sctp_retransmit
(
q
,
transport
,
SCTP_R
TXR
_FAST_RTX
);
SCTP_DEBUG_PRINTK
(
"%s: transport: %p, cwnd: %d, "
"ssthresh: %d, flight_size: %d, pba: %d
\n
"
,
...
...
net/sctp/sm_sideeffect.c
View file @
a33b4399
...
...
@@ -55,1202 +55,1128 @@
#include <net/sctp/sctp.h>
#include <net/sctp/sm.h>
/* Do forward declarations of static functions. */
static
void
sctp_do_ecn_ce_work
(
sctp_association_t
*
,
__u32
lowest_tsn
);
static
sctp_chunk_t
*
sctp_do_ecn_ecne_work
(
sctp_association_t
*
asoc
,
__u32
lowest_tsn
,
sctp_chunk_t
*
);
static
void
sctp_do_ecn_cwr_work
(
sctp_association_t
*
,
__u32
lowest_tsn
);
static
void
sctp_do_8_2_transport_strike
(
sctp_association_t
*
,
struct
sctp_transport
*
);
static
void
sctp_cmd_init_failed
(
sctp_cmd_seq_t
*
,
sctp_association_t
*
);
static
void
sctp_cmd_assoc_failed
(
sctp_cmd_seq_t
*
,
sctp_association_t
*
,
sctp_event_t
,
sctp_subtype_t
,
sctp_chunk_t
*
chunk
);
static
int
sctp_cmd_process_init
(
sctp_cmd_seq_t
*
,
sctp_association_t
*
,
sctp_chunk_t
*
chunk
,
sctp_init_chunk_t
*
peer_init
,
int
priority
);
static
void
sctp_cmd_hb_timers_start
(
sctp_cmd_seq_t
*
,
sctp_association_t
*
);
static
void
sctp_cmd_hb_timers_stop
(
sctp_cmd_seq_t
*
,
sctp_association_t
*
);
static
void
sctp_cmd_hb_timer_update
(
sctp_cmd_seq_t
*
,
sctp_association_t
*
,
struct
sctp_transport
*
);
static
void
sctp_cmd_transport_reset
(
sctp_cmd_seq_t
*
,
sctp_association_t
*
,
struct
sctp_transport
*
);
static
void
sctp_cmd_transport_on
(
sctp_cmd_seq_t
*
,
sctp_association_t
*
,
struct
sctp_transport
*
,
sctp_chunk_t
*
);
static
int
sctp_cmd_process_sack
(
sctp_cmd_seq_t
*
,
sctp_association_t
*
,
sctp_sackhdr_t
*
);
static
void
sctp_cmd_setup_t2
(
sctp_cmd_seq_t
*
,
sctp_association_t
*
,
sctp_chunk_t
*
);
static
void
sctp_cmd_new_state
(
sctp_cmd_seq_t
*
,
sctp_association_t
*
,
sctp_state_t
);
/* These three macros allow us to pull the debugging code out of the
* main flow of sctp_do_sm() to keep attention focused on the real
* functionality there.
*/
#define DEBUG_PRE \
SCTP_DEBUG_PRINTK("sctp_do_sm prefn: " \
"ep %p, %s, %s, asoc %p[%s], %s\n", \
ep, sctp_evttype_tbl[event_type], \
(*debug_fn)(subtype), asoc, \
sctp_state_tbl[state], state_fn->name)
/********************************************************************
* Helper functions
********************************************************************/
#define DEBUG_POST \
SCTP_DEBUG_PRINTK("sctp_do_sm postfn: " \
"asoc %p, status: %s\n", \
asoc, sctp_status_tbl[status])
/* A helper function for delayed processing of INET ECN CE bit. */
static
void
sctp_do_ecn_ce_work
(
sctp_association_t
*
asoc
,
__u32
lowest_tsn
)
{
/* Save the TSN away for comparison when we receive CWR */
#define DEBUG_POST_SFX \
SCTP_DEBUG_PRINTK("sctp_do_sm post sfx: error %d, asoc %p[%s]\n", \
error, asoc, \
sctp_state_tbl[(asoc && sctp_id2assoc(ep->base.sk, \
sctp_assoc2id(asoc)))?asoc->state:SCTP_STATE_CLOSED])
asoc
->
last_ecne_tsn
=
lowest_tsn
;
asoc
->
need_ecne
=
1
;
}
/*
* This is the master state machine processing function.
/*
Helper function for delayed processing of SCTP ECNE chunk. */
/* RFC 2960 Appendix A
*
* If you want to understand all of lksctp, this is a
* good place to start.
* RFC 2481 details a specific bit for a sender to send in
* the header of its next outbound TCP segment to indicate to
* its peer that it has reduced its congestion window. This
* is termed the CWR bit. For SCTP the same indication is made
* by including the CWR chunk. This chunk contains one data
* element, i.e. the TSN number that was sent in the ECNE chunk.
* This element represents the lowest TSN number in the datagram
* that was originally marked with the CE bit.
*/
int
sctp_do_sm
(
sctp_event_t
event_type
,
sctp_subtype_t
subtype
,
sctp_state_t
state
,
sctp_endpoint_t
*
ep
,
sctp_association_t
*
asoc
,
void
*
event_arg
,
int
priority
)
static
sctp_chunk_t
*
sctp_do_ecn_ecne_work
(
sctp_association_t
*
asoc
,
__u32
lowest_tsn
,
sctp_chunk_t
*
chunk
)
{
sctp_cmd_seq_t
commands
;
sctp_sm_table_entry_t
*
state_fn
;
sctp_disposition_t
status
;
int
error
=
0
;
typedef
const
char
*
(
printfn_t
)(
sctp_subtype_t
);
sctp_chunk_t
*
repl
;
static
printfn_t
*
table
[]
=
{
NULL
,
sctp_cname
,
sctp_tname
,
sctp_oname
,
sctp_pname
,
};
printfn_t
*
debug_fn
__attribute__
((
unused
))
=
table
[
event_type
];
/* Our previously transmitted packet ran into some congestion
* so we should take action by reducing cwnd and ssthresh
* and then ACK our peer that we we've done so by
* sending a CWR.
*/
/* Look up the state function, run it, and then process the
* side effects. These three steps are the heart of lksctp.
/* First, try to determine if we want to actually lower
* our cwnd variables. Only lower them if the ECNE looks more
* recent than the last response.
*/
state_fn
=
sctp_sm_lookup_event
(
event_type
,
state
,
subtype
);
if
(
TSN_lt
(
asoc
->
last_cwr_tsn
,
lowest_tsn
))
{
struct
sctp_transport
*
transport
;
sctp_init_cmd_seq
(
&
commands
);
/* Find which transport's congestion variables
* need to be adjusted.
*/
transport
=
sctp_assoc_lookup_tsn
(
asoc
,
lowest_tsn
);
DEBUG_PRE
;
status
=
(
*
state_fn
->
fn
)(
ep
,
asoc
,
subtype
,
event_arg
,
&
commands
);
DEBUG_POST
;
/* Update the congestion variables. */
if
(
transport
)
sctp_transport_lower_cwnd
(
transport
,
SCTP_LOWER_CWND_ECNE
);
asoc
->
last_cwr_tsn
=
lowest_tsn
;
}
error
=
sctp_side_effects
(
event_type
,
subtype
,
state
,
ep
,
asoc
,
event_arg
,
status
,
&
commands
,
priority
);
DEBUG_POST_SFX
;
/* Always try to quiet the other end. In case of lost CWR,
* resend last_cwr_tsn.
*/
repl
=
sctp_make_cwr
(
asoc
,
asoc
->
last_cwr_tsn
,
chunk
);
return
error
;
/* If we run out of memory, it will look like a lost CWR. We'll
* get back in sync eventually.
*/
return
repl
;
}
#undef DEBUG_PRE
#undef DEBUG_POST
/*****************************************************************
* This the master state function side effect processing function.
*****************************************************************/
int
sctp_side_effects
(
sctp_event_t
event_type
,
sctp_subtype_t
subtype
,
sctp_state_t
state
,
sctp_endpoint_t
*
ep
,
sctp_association_t
*
asoc
,
void
*
event_arg
,
sctp_disposition_t
status
,
sctp_cmd_seq_t
*
commands
,
int
priority
)
/* Helper function to do delayed processing of ECN CWR chunk. */
static
void
sctp_do_ecn_cwr_work
(
sctp_association_t
*
asoc
,
__u32
lowest_tsn
)
{
int
error
;
/* FIXME - Most of the dispositions left today would be categorized
* as "exceptional" dispositions. For those dispositions, it
* may not be proper to run through any of the commands at all.
* For example, the command interpreter might be run only with
* disposition SCTP_DISPOSITION_CONSUME.
/* Turn off ECNE getting auto-prepended to every outgoing
* packet
*/
if
(
0
!=
(
error
=
sctp_cmd_interpreter
(
event_type
,
subtype
,
state
,
ep
,
asoc
,
event_arg
,
status
,
commands
,
priority
)))
goto
bail
;
asoc
->
need_ecne
=
0
;
}
switch
(
status
)
{
case
SCTP_DISPOSITION_DISCARD
:
SCTP_DEBUG_PRINTK
(
"Ignored sctp protocol event - state %d, "
"event_type %d, event_id %d
\n
"
,
state
,
event_type
,
subtype
.
chunk
);
break
;
/* Generate SACK if necessary. We call this at the end of a packet. */
int
sctp_gen_sack
(
struct
sctp_association
*
asoc
,
int
force
,
sctp_cmd_seq_t
*
commands
)
{
__u32
ctsn
,
max_tsn_seen
;
struct
sctp_chunk
*
sack
;
int
error
=
0
;
case
SCTP_DISPOSITION_NOMEM
:
/* We ran out of memory, so we need to discard this
* packet.
*/
/* BUG--we should now recover some memory, probably by
* reneging...
*/
error
=
-
ENOMEM
;
break
;
if
(
force
)
asoc
->
peer
.
sack_needed
=
1
;
case
SCTP_DISPOSITION_DELETE_TCB
:
/* This should now be a command. */
break
;
ctsn
=
sctp_tsnmap_get_ctsn
(
&
asoc
->
peer
.
tsn_map
);
max_tsn_seen
=
sctp_tsnmap_get_max_tsn_seen
(
&
asoc
->
peer
.
tsn_map
);
case
SCTP_DISPOSITION_CONSUME
:
case
SCTP_DISPOSITION_ABORT
:
/*
* We should no longer have much work to do here as the
* real work has been done as explicit commands above.
/* From 12.2 Parameters necessary per association (i.e. the TCB):
*
* Ack State : This flag indicates if the next received packet
* : is to be responded to with a SACK. ...
* : When DATA chunks are out of order, SACK's
* : are not delayed (see Section 6).
*
* [This is actually not mentioned in Section 6, but we
* implement it here anyway. --piggy]
*/
break
;
case
SCTP_DISPOSITION_VIOLATION
:
printk
(
KERN_ERR
"sctp protocol violation state %d "
"chunkid %d
\n
"
,
state
,
subtype
.
chunk
);
break
;
if
(
max_tsn_seen
!=
ctsn
)
asoc
->
peer
.
sack_needed
=
1
;
case
SCTP_DISPOSITION_NOT_IMPL
:
printk
(
KERN_WARNING
"sctp unimplemented feature in state %d, "
"event_type %d, event_id %d
\n
"
,
state
,
event_type
,
subtype
.
chunk
);
break
;
/* From 6.2 Acknowledgement on Reception of DATA Chunks:
*
* Section 4.2 of [RFC2581] SHOULD be followed. Specifically,
* an acknowledgement SHOULD be generated for at least every
* second packet (not every second DATA chunk) received, and
* SHOULD be generated within 200 ms of the arrival of any
* unacknowledged DATA chunk. ...
*/
if
(
!
asoc
->
peer
.
sack_needed
)
{
/* We will need a SACK for the next packet. */
asoc
->
peer
.
sack_needed
=
1
;
goto
out
;
}
else
{
if
(
asoc
->
a_rwnd
>
asoc
->
rwnd
)
asoc
->
a_rwnd
=
asoc
->
rwnd
;
sack
=
sctp_make_sack
(
asoc
);
if
(
!
sack
)
goto
nomem
;
case
SCTP_DISPOSITION_BUG
:
printk
(
KERN_ERR
"sctp bug in state %d, "
"event_type %d, event_id %d
\n
"
,
state
,
event_type
,
subtype
.
chunk
);
BUG
();
break
;
asoc
->
peer
.
sack_needed
=
0
;
default:
printk
(
KERN_ERR
"sctp impossible disposition %d "
"in state %d, event_type %d, event_id %d
\n
"
,
status
,
state
,
event_type
,
subtype
.
chunk
);
BUG
();
break
;
};
error
=
sctp_outq_tail
(
&
asoc
->
outqueue
,
sack
);
bail:
/* Stop the SACK timer. */
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_TIMER_STOP
,
SCTP_TO
(
SCTP_EVENT_TIMEOUT_SACK
));
}
out:
return
error
;
nomem:
error
=
-
ENOMEM
;
return
error
;
}
/********************************************************************
* 2nd Level Abstractions
********************************************************************/
/* This is the side-effect interpreter. */
int
sctp_cmd_interpreter
(
sctp_event_t
event_type
,
sctp_subtype_t
subtype
,
sctp_state_t
state
,
sctp_endpoint_t
*
ep
,
sctp_association_t
*
asoc
,
void
*
event_arg
,
sctp_disposition_t
status
,
sctp_cmd_seq_t
*
commands
,
int
priority
)
/* When the T3-RTX timer expires, it calls this function to create the
* relevant state machine event.
*/
void
sctp_generate_t3_rtx_event
(
unsigned
long
peer
)
{
int
error
=
0
;
int
force
;
sctp_cmd_t
*
cmd
;
sctp_chunk_t
*
new_obj
;
sctp_chunk_t
*
chunk
=
NULL
;
struct
sctp_packet
*
packet
;
struct
list_head
*
pos
;
struct
timer_list
*
timer
;
unsigned
long
timeout
;
struct
sctp_transport
*
t
;
sctp_sackhdr_t
sackh
;
if
(
SCTP_EVENT_T_TIMEOUT
!=
event_type
)
chunk
=
(
sctp_chunk_t
*
)
event_arg
;
int
error
;
struct
sctp_transport
*
transport
=
(
struct
sctp_transport
*
)
peer
;
sctp_association_t
*
asoc
=
transport
->
asoc
;
/* Note: This whole file is a huge candidate for rework.
* For example, each command could either have its own handler, so
* the loop would look like:
* while (cmds)
* cmd->handle(x, y, z)
* --jgrimm
*/
while
(
NULL
!=
(
cmd
=
sctp_next_cmd
(
commands
)))
{
switch
(
cmd
->
verb
)
{
case
SCTP_CMD_NOP
:
/* Do nothing. */
break
;
/* Check whether a task is in the sock. */
case
SCTP_CMD_NEW_ASOC
:
/* Register a new association. */
asoc
=
cmd
->
obj
.
ptr
;
/* Register with the endpoint. */
sctp_endpoint_add_asoc
(
ep
,
asoc
);
sctp_hash_established
(
asoc
);
break
;
sctp_bh_lock_sock
(
asoc
->
base
.
sk
);
if
(
sock_owned_by_user
(
asoc
->
base
.
sk
))
{
SCTP_DEBUG_PRINTK
(
"%s:Sock is busy.
\n
"
,
__FUNCTION__
);
case
SCTP_CMD_UPDATE_ASSOC
:
sctp_assoc_update
(
asoc
,
cmd
->
obj
.
ptr
);
break
;
/* Try again later. */
if
(
!
mod_timer
(
&
transport
->
T3_rtx_timer
,
jiffies
+
(
HZ
/
20
)))
sctp_transport_hold
(
transport
);
goto
out_unlock
;
}
case
SCTP_CMD_PURGE_OUTQUEUE
:
sctp_outq_teardown
(
&
asoc
->
outqueue
);
break
;
/* Is this transport really dead and just waiting around for
* the timer to let go of the reference?
*/
if
(
transport
->
dead
)
goto
out_unlock
;
case
SCTP_CMD_DELETE_TCB
:
/* Delete the current association. */
sctp_unhash_established
(
asoc
);
sctp_association_free
(
asoc
);
asoc
=
NULL
;
break
;
/* Run through the state machine. */
error
=
sctp_do_sm
(
SCTP_EVENT_T_TIMEOUT
,
SCTP_ST_TIMEOUT
(
SCTP_EVENT_TIMEOUT_T3_RTX
),
asoc
->
state
,
asoc
->
ep
,
asoc
,
transport
,
GFP_ATOMIC
)
;
case
SCTP_CMD_NEW_STATE
:
/* Enter a new state. */
sctp_cmd_new_state
(
commands
,
asoc
,
cmd
->
obj
.
state
);
break
;
if
(
error
)
asoc
->
base
.
sk
->
err
=
-
error
;
case
SCTP_CMD_REPORT_TSN
:
/* Record the arrival of a TSN. */
sctp_tsnmap_mark
(
&
asoc
->
peer
.
tsn_map
,
cmd
->
obj
.
u32
);
break
;
out_unlock
:
sctp_bh_unlock_sock
(
asoc
->
base
.
sk
);
sctp_transport_put
(
transport
);
}
case
SCTP_CMD_GEN_SACK
:
/* Generate a Selective ACK.
* The argument tells us whether to just count
* the packet and MAYBE generate a SACK, or
* force a SACK out.
/* This is a sa interface for producing timeout events. It works
* for timeouts which use the association as their parameter.
*/
force
=
cmd
->
obj
.
i32
;
error
=
sctp_gen_sack
(
asoc
,
force
,
commands
);
break
;
case
SCTP_CMD_PROCESS_SACK
:
/* Process an inbound SACK. */
error
=
sctp_cmd_process_sack
(
commands
,
asoc
,
cmd
->
obj
.
ptr
);
break
;
static
void
sctp_generate_timeout_event
(
sctp_association_t
*
asoc
,
sctp_event_timeout_t
timeout_type
)
{
int
error
=
0
;
case
SCTP_CMD_GEN_INIT_ACK
:
/* Generate an INIT ACK chunk. */
new_obj
=
sctp_make_init_ack
(
asoc
,
chunk
,
GFP_ATOMIC
,
0
);
if
(
!
new_obj
)
goto
nomem
;
sctp_bh_lock_sock
(
asoc
->
base
.
sk
);
if
(
sock_owned_by_user
(
asoc
->
base
.
sk
))
{
SCTP_DEBUG_PRINTK
(
"%s:Sock is busy: timer %d
\n
"
,
__FUNCTION__
,
timeout_type
);
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_REPLY
,
SCTP_CHUNK
(
new_obj
));
break
;
/* Try again later. */
if
(
!
mod_timer
(
&
asoc
->
timers
[
timeout_type
],
jiffies
+
(
HZ
/
20
)))
sctp_association_hold
(
asoc
);
goto
out_unlock
;
}
case
SCTP_CMD_PEER_INIT
:
/* Process a unified INIT from the peer.
* Note: Only used during INIT-ACK processing. If
* there is an error just return to the outter
* layer which will bail.
/* Is this association really dead and just waiting around for
* the timer to let go of the reference?
*/
error
=
sctp_cmd_process_init
(
commands
,
asoc
,
chunk
,
cmd
->
obj
.
ptr
,
priority
);
break
;
if
(
asoc
->
base
.
dead
)
goto
out_unlock
;
case
SCTP_CMD_GEN_COOKIE_ECHO
:
/* Generate a COOKIE ECHO chunk. */
new_obj
=
sctp_make_cookie_echo
(
asoc
,
chunk
);
if
(
!
new_obj
)
{
if
(
cmd
->
obj
.
ptr
)
sctp_free_chunk
(
cmd
->
obj
.
ptr
);
goto
nomem
;
}
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_REPLY
,
SCTP_CHUNK
(
new_obj
));
/* Run through the state machine. */
error
=
sctp_do_sm
(
SCTP_EVENT_T_TIMEOUT
,
SCTP_ST_TIMEOUT
(
timeout_type
),
asoc
->
state
,
asoc
->
ep
,
asoc
,
(
void
*
)
timeout_type
,
GFP_ATOMIC
);
/* If there is an ERROR chunk to be sent along with
* the COOKIE_ECHO, send it, too.
*/
if
(
cmd
->
obj
.
ptr
)
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_REPLY
,
SCTP_CHUNK
(
cmd
->
obj
.
ptr
));
break
;
if
(
error
)
asoc
->
base
.
sk
->
err
=
-
error
;
case
SCTP_CMD_GEN_SHUTDOWN
:
/* Generate SHUTDOWN when in SHUTDOWN_SENT state.
* Reset error counts.
*/
asoc
->
overall_error_count
=
0
;
out_unlock:
sctp_bh_unlock_sock
(
asoc
->
base
.
sk
);
sctp_association_put
(
asoc
);
}
/* Generate a SHUTDOWN chunk. */
new_obj
=
sctp_make_shutdown
(
asoc
);
if
(
!
new_obj
)
goto
nomem
;
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_REPLY
,
SCTP_CHUNK
(
new_obj
));
break
;
void
sctp_generate_t1_cookie_event
(
unsigned
long
data
)
{
sctp_association_t
*
asoc
=
(
sctp_association_t
*
)
data
;
sctp_generate_timeout_event
(
asoc
,
SCTP_EVENT_TIMEOUT_T1_COOKIE
);
}
case
SCTP_CMD_CHUNK_ULP
:
/* Send a chunk to the sockets layer. */
SCTP_DEBUG_PRINTK
(
"sm_sideff: %s %p, %s %p.
\n
"
,
"chunk_up:"
,
cmd
->
obj
.
ptr
,
"ulpq:"
,
&
asoc
->
ulpq
);
sctp_ulpq_tail_data
(
&
asoc
->
ulpq
,
cmd
->
obj
.
ptr
,
GFP_ATOMIC
);
break
;
void
sctp_generate_t1_init_event
(
unsigned
long
data
)
{
sctp_association_t
*
asoc
=
(
sctp_association_t
*
)
data
;
sctp_generate_timeout_event
(
asoc
,
SCTP_EVENT_TIMEOUT_T1_INIT
);
}
case
SCTP_CMD_EVENT_ULP
:
/* Send a notification to the sockets layer. */
SCTP_DEBUG_PRINTK
(
"sm_sideff: %s %p, %s %p.
\n
"
,
"event_up:"
,
cmd
->
obj
.
ptr
,
"ulpq:"
,
&
asoc
->
ulpq
);
sctp_ulpq_tail_event
(
&
asoc
->
ulpq
,
cmd
->
obj
.
ptr
);
break
;
void
sctp_generate_t2_shutdown_event
(
unsigned
long
data
)
{
sctp_association_t
*
asoc
=
(
sctp_association_t
*
)
data
;
sctp_generate_timeout_event
(
asoc
,
SCTP_EVENT_TIMEOUT_T2_SHUTDOWN
);
}
case
SCTP_CMD_REPLY
:
/* Send a chunk to our peer. */
error
=
sctp_outq_tail
(
&
asoc
->
outqueue
,
cmd
->
obj
.
ptr
);
break
;
void
sctp_generate_t5_shutdown_guard_event
(
unsigned
long
data
)
{
sctp_association_t
*
asoc
=
(
sctp_association_t
*
)
data
;
sctp_generate_timeout_event
(
asoc
,
SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD
)
;
case
SCTP_CMD_SEND_PKT
:
/* Send a full packet to our peer. */
packet
=
cmd
->
obj
.
ptr
;
sctp_packet_transmit
(
packet
);
sctp_ootb_pkt_free
(
packet
);
break
;
}
/* sctp_generate_t5_shutdown_guard_event() */
case
SCTP_CMD_RETRAN
:
/* Mark a transport for retransmission. */
sctp_retransmit
(
&
asoc
->
outqueue
,
cmd
->
obj
.
transport
,
SCTP_RETRANSMIT_T3_RTX
);
break
;
void
sctp_generate_autoclose_event
(
unsigned
long
data
)
{
sctp_association_t
*
asoc
=
(
sctp_association_t
*
)
data
;
sctp_generate_timeout_event
(
asoc
,
SCTP_EVENT_TIMEOUT_AUTOCLOSE
);
}
case
SCTP_CMD_TRANSMIT
:
/* Kick start transmission. */
error
=
sctp_outq_flush
(
&
asoc
->
outqueue
,
0
);
break
;
/* Generate a heart beat event. If the sock is busy, reschedule. Make
* sure that the transport is still valid.
*/
void
sctp_generate_heartbeat_event
(
unsigned
long
data
)
{
int
error
=
0
;
struct
sctp_transport
*
transport
=
(
struct
sctp_transport
*
)
data
;
sctp_association_t
*
asoc
=
transport
->
asoc
;
case
SCTP_CMD_ECN_CE
:
/* Do delayed CE processing. */
sctp_do_ecn_ce_work
(
asoc
,
cmd
->
obj
.
u32
);
break
;
sctp_bh_lock_sock
(
asoc
->
base
.
sk
);
if
(
sock_owned_by_user
(
asoc
->
base
.
sk
))
{
SCTP_DEBUG_PRINTK
(
"%s:Sock is busy.
\n
"
,
__FUNCTION__
);
case
SCTP_CMD_ECN_ECNE
:
/* Do delayed ECNE processing. */
new_obj
=
sctp_do_ecn_ecne_work
(
asoc
,
cmd
->
obj
.
u32
,
chunk
);
if
(
new_obj
)
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_REPLY
,
SCTP_CHUNK
(
new_obj
));
break
;
/* Try again later. */
if
(
!
mod_timer
(
&
transport
->
hb_timer
,
jiffies
+
(
HZ
/
20
)))
sctp_transport_hold
(
transport
);
goto
out_unlock
;
}
case
SCTP_CMD_ECN_CWR
:
/* Do delayed CWR processing. */
sctp_do_ecn_cwr_work
(
asoc
,
cmd
->
obj
.
u32
);
break
;
/* Is this structure just waiting around for us to actually
* get destroyed?
*/
if
(
transport
->
dead
)
goto
out_unlock
;
case
SCTP_CMD_SETUP_T2
:
sctp_cmd_setup_t2
(
commands
,
asoc
,
cmd
->
obj
.
ptr
);
break
;
error
=
sctp_do_sm
(
SCTP_EVENT_T_TIMEOUT
,
SCTP_ST_TIMEOUT
(
SCTP_EVENT_TIMEOUT_HEARTBEAT
),
asoc
->
state
,
asoc
->
ep
,
asoc
,
transport
,
GFP_ATOMIC
);
case
SCTP_CMD_TIMER_START
:
timer
=
&
asoc
->
timers
[
cmd
->
obj
.
to
];
timeout
=
asoc
->
timeouts
[
cmd
->
obj
.
to
];
if
(
!
timeout
)
BUG
();
if
(
error
)
asoc
->
base
.
sk
->
err
=
-
error
;
timer
->
expires
=
jiffies
+
timeout
;
sctp_association_hold
(
asoc
);
add_timer
(
timer
);
break
;
out_unlock:
sctp_bh_unlock_sock
(
asoc
->
base
.
sk
);
sctp_transport_put
(
transport
);
}
case
SCTP_CMD_TIMER_RESTART
:
timer
=
&
asoc
->
timers
[
cmd
->
obj
.
to
];
timeout
=
asoc
->
timeouts
[
cmd
->
obj
.
to
];
if
(
!
mod_timer
(
timer
,
jiffies
+
timeout
))
sctp_association_hold
(
asoc
);
break
;
/* Inject a SACK Timeout event into the state machine. */
void
sctp_generate_sack_event
(
unsigned
long
data
)
{
sctp_association_t
*
asoc
=
(
sctp_association_t
*
)
data
;
sctp_generate_timeout_event
(
asoc
,
SCTP_EVENT_TIMEOUT_SACK
);
}
case
SCTP_CMD_TIMER_STOP
:
timer
=
&
asoc
->
timers
[
cmd
->
obj
.
to
];
if
(
timer_pending
(
timer
)
&&
del_timer
(
timer
))
sctp_association_put
(
asoc
);
break
;
sctp_timer_event_t
*
sctp_timer_events
[
SCTP_NUM_TIMEOUT_TYPES
]
=
{
NULL
,
sctp_generate_t1_cookie_event
,
sctp_generate_t1_init_event
,
sctp_generate_t2_shutdown_event
,
NULL
,
sctp_generate_t5_shutdown_guard_event
,
sctp_generate_heartbeat_event
,
sctp_generate_sack_event
,
sctp_generate_autoclose_event
,
};
case
SCTP_CMD_INIT_RESTART
:
/* Do the needed accounting and updates
* associated with restarting an initialization
* timer.
/* RFC 2960 8.2 Path Failure Detection
*
* When its peer endpoint is multi-homed, an endpoint should keep a
* error counter for each of the destination transport addresses of the
* peer endpoint.
*
* Each time the T3-rtx timer expires on any address, or when a
* HEARTBEAT sent to an idle address is not acknowledged within a RTO,
* the error counter of that destination address will be incremented.
* When the value in the error counter exceeds the protocol parameter
* 'Path.Max.Retrans' of that destination address, the endpoint should
* mark the destination transport address as inactive, and a
* notification SHOULD be sent to the upper layer.
*
*/
asoc
->
counters
[
SCTP_COUNTER_INIT_ERROR
]
++
;
asoc
->
timeouts
[
cmd
->
obj
.
to
]
*=
2
;
if
(
asoc
->
timeouts
[
cmd
->
obj
.
to
]
>
asoc
->
max_init_timeo
)
{
asoc
->
timeouts
[
cmd
->
obj
.
to
]
=
asoc
->
max_init_timeo
;
static
void
sctp_do_8_2_transport_strike
(
sctp_association_t
*
asoc
,
struct
sctp_transport
*
transport
)
{
/* The check for association's overall error counter exceeding the
* threshold is done in the state function.
*/
asoc
->
overall_error_count
++
;
if
(
transport
->
active
&&
(
transport
->
error_count
++
>=
transport
->
error_threshold
))
{
SCTP_DEBUG_PRINTK
(
"transport_strike: transport "
"IP:%d.%d.%d.%d failed.
\n
"
,
NIPQUAD
(
transport
->
ipaddr
.
v4
.
sin_addr
));
sctp_assoc_control_transport
(
asoc
,
transport
,
SCTP_TRANSPORT_DOWN
,
SCTP_FAILED_THRESHOLD
);
}
/* If we've sent any data bundled with
* COOKIE-ECHO we need to resend.
/* E2) For the destination address for which the timer
* expires, set RTO <- RTO * 2 ("back off the timer"). The
* maximum value discussed in rule C7 above (RTO.max) may be
* used to provide an upper bound to this doubling operation.
*/
list_for_each
(
pos
,
&
asoc
->
peer
.
transport_addr_list
)
{
t
=
list_entry
(
pos
,
struct
sctp_transport
,
transports
);
sctp_retransmit_mark
(
&
asoc
->
outqueue
,
t
,
0
);
}
transport
->
rto
=
min
((
transport
->
rto
*
2
),
transport
->
asoc
->
rto_max
);
}
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_TIMER_RESTART
,
SCTP_TO
(
cmd
->
obj
.
to
));
break
;
/* Worker routine to handle INIT command failure. */
static
void
sctp_cmd_init_failed
(
sctp_cmd_seq_t
*
commands
,
sctp_association_t
*
asoc
)
{
struct
sctp_ulpevent
*
event
;
case
SCTP_CMD_INIT_FAILED
:
sctp_cmd_init_failed
(
commands
,
asoc
);
break
;
event
=
sctp_ulpevent_make_assoc_change
(
asoc
,
0
,
SCTP_CANT_STR_ASSOC
,
0
,
0
,
0
,
GFP_ATOMIC
);
case
SCTP_CMD_ASSOC_FAILED
:
sctp_cmd_assoc_failed
(
commands
,
asoc
,
event_type
,
subtype
,
chunk
);
break
;
if
(
event
)
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_EVENT_ULP
,
SCTP_ULPEVENT
(
event
));
case
SCTP_CMD_COUNTER_INC
:
asoc
->
counters
[
cmd
->
obj
.
counter
]
++
;
break
;
/* FIXME: We need to handle data possibly either
* sent via COOKIE-ECHO bundling or just waiting in
* the transmit queue, if the user has enabled
* SEND_FAILED notifications.
*/
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_DELETE_TCB
,
SCTP_NULL
());
}
case
SCTP_CMD_COUNTER_RESET
:
asoc
->
counters
[
cmd
->
obj
.
counter
]
=
0
;
break
;
/* Worker routine to handle SCTP_CMD_ASSOC_FAILED. */
static
void
sctp_cmd_assoc_failed
(
sctp_cmd_seq_t
*
commands
,
sctp_association_t
*
asoc
,
sctp_event_t
event_type
,
sctp_subtype_t
subtype
,
sctp_chunk_t
*
chunk
)
{
struct
sctp_ulpevent
*
event
;
__u16
error
=
0
;
case
SCTP_CMD_REPORT_DUP
:
sctp_tsnmap_mark_dup
(
&
asoc
->
peer
.
tsn_map
,
cmd
->
obj
.
u32
);
switch
(
event_type
)
{
case
SCTP_EVENT_T_PRIMITIVE
:
if
(
SCTP_PRIMITIVE_ABORT
==
subtype
.
primitive
)
error
=
SCTP_ERROR_USER_ABORT
;
break
;
case
SCTP_CMD_REPORT_BAD_TAG
:
SCTP_DEBUG_PRINTK
(
"vtag mismatch!
\n
"
);
case
SCTP_EVENT_T_CHUNK
:
if
(
chunk
&&
(
SCTP_CID_ABORT
==
chunk
->
chunk_hdr
->
type
)
&&
(
ntohs
(
chunk
->
chunk_hdr
->
length
)
>=
(
sizeof
(
struct
sctp_chunkhdr
)
+
sizeof
(
struct
sctp_errhdr
))))
{
error
=
((
sctp_errhdr_t
*
)
chunk
->
skb
->
data
)
->
cause
;
}
break
;
case
SCTP_CMD_STRIKE
:
/* Mark one strike against a transport. */
sctp_do_8_2_transport_strike
(
asoc
,
cmd
->
obj
.
transport
);
default:
break
;
}
case
SCTP_CMD_TRANSPORT_RESET
:
t
=
cmd
->
obj
.
transport
;
sctp_cmd_transport_reset
(
commands
,
asoc
,
t
);
break
;
/* Cancel any partial delivery in progress. */
sctp_ulpq_abort_pd
(
&
asoc
->
ulpq
,
GFP_ATOMIC
);
case
SCTP_CMD_TRANSPORT_ON
:
t
=
cmd
->
obj
.
transport
;
sctp_cmd_transport_on
(
commands
,
asoc
,
t
,
chunk
);
break
;
event
=
sctp_ulpevent_make_assoc_change
(
asoc
,
0
,
SCTP_COMM_LOST
,
error
,
0
,
0
,
GFP_ATOMIC
);
if
(
event
)
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_EVENT_ULP
,
SCTP_ULPEVENT
(
event
));
case
SCTP_CMD_HB_TIMERS_START
:
sctp_cmd_hb_timers_start
(
commands
,
asoc
);
break
;
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_NEW_STATE
,
SCTP_STATE
(
SCTP_STATE_CLOSED
));
case
SCTP_CMD_HB_TIMER_UPDATE
:
t
=
cmd
->
obj
.
transport
;
sctp_cmd_hb_timer_update
(
commands
,
asoc
,
t
);
break
;
/* FIXME: We need to handle data that could not be sent or was not
* acked, if the user has enabled SEND_FAILED notifications.
*/
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_DELETE_TCB
,
SCTP_NULL
());
}
case
SCTP_CMD_HB_TIMERS_STOP
:
sctp_cmd_hb_timers_stop
(
commands
,
asoc
);
break
;
/* Process an init chunk (may be real INIT/INIT-ACK or an embedded INIT
* inside the cookie. In reality, this is only used for INIT-ACK processing
* since all other cases use "temporary" associations and can do all
* their work in statefuns directly.
*/
static
int
sctp_cmd_process_init
(
sctp_cmd_seq_t
*
commands
,
sctp_association_t
*
asoc
,
sctp_chunk_t
*
chunk
,
sctp_init_chunk_t
*
peer_init
,
int
priority
)
{
int
error
;
case
SCTP_CMD_REPORT_ERROR
:
error
=
cmd
->
obj
.
error
;
break
;
/* We only process the init as a sideeffect in a single
* case. This is when we process the INIT-ACK. If we
* fail during INIT processing (due to malloc problems),
* just return the error and stop processing the stack.
*/
case
SCTP_CMD_PROCESS_CTSN
:
/* Dummy up a SACK for processing. */
sackh
.
cum_tsn_ack
=
cmd
->
obj
.
u32
;
sackh
.
a_rwnd
=
0
;
sackh
.
num_gap_ack_blocks
=
0
;
sackh
.
num_dup_tsns
=
0
;
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_PROCESS_SACK
,
SCTP_SACKH
(
&
sackh
));
break
;
if
(
!
sctp_process_init
(
asoc
,
chunk
->
chunk_hdr
->
type
,
sctp_source
(
chunk
),
peer_init
,
priority
))
error
=
-
ENOMEM
;
else
error
=
0
;
case
SCTP_CMD_DISCARD_PACKET
:
/* We need to discard the whole packet. */
chunk
->
pdiscard
=
1
;
break
;
return
error
;
}
case
SCTP_CMD_RTO_PENDING
:
t
=
cmd
->
obj
.
transport
;
t
->
rto_pending
=
1
;
break
;
/* Helper function to break out starting up of heartbeat timers. */
static
void
sctp_cmd_hb_timers_start
(
sctp_cmd_seq_t
*
cmds
,
sctp_association_t
*
asoc
)
{
struct
sctp_transport
*
t
;
struct
list_head
*
pos
;
case
SCTP_CMD_PART_DELIVER
:
sctp_ulpq_partial_delivery
(
&
asoc
->
ulpq
,
cmd
->
obj
.
ptr
,
GFP_ATOMIC
);
break
;
/* Start a heartbeat timer for each transport on the association.
* hold a reference on the transport to make sure none of
* the needed data structures go away.
*/
list_for_each
(
pos
,
&
asoc
->
peer
.
transport_addr_list
)
{
t
=
list_entry
(
pos
,
struct
sctp_transport
,
transports
);
case
SCTP_CMD_RENEGE
:
sctp_
ulpq_renege
(
&
asoc
->
ulpq
,
cmd
->
obj
.
ptr
,
GFP_ATOMIC
);
break
;
if
(
!
mod_timer
(
&
t
->
hb_timer
,
sctp_transport_timeout
(
t
)))
sctp_
transport_hold
(
t
);
}
}
default:
printk
(
KERN_WARNING
"Impossible command: %u, %p
\n
"
,
cmd
->
verb
,
cmd
->
obj
.
ptr
);
break
;
};
if
(
error
)
return
error
;
static
void
sctp_cmd_hb_timers_stop
(
sctp_cmd_seq_t
*
cmds
,
sctp_association_t
*
asoc
)
{
struct
sctp_transport
*
t
;
struct
list_head
*
pos
;
/* Stop all heartbeat timers. */
list_for_each
(
pos
,
&
asoc
->
peer
.
transport_addr_list
)
{
t
=
list_entry
(
pos
,
struct
sctp_transport
,
transports
);
if
(
del_timer
(
&
t
->
hb_timer
))
sctp_transport_put
(
t
);
}
return
error
;
nomem:
error
=
-
ENOMEM
;
return
error
;
}
/* A helper function for delayed processing of INET ECN CE bit. */
static
void
sctp_do_ecn_ce_work
(
sctp_association_t
*
asoc
,
__u32
lowest_tsn
)
/* Helper function to update the heartbeat timer. */
static
void
sctp_cmd_hb_timer_update
(
sctp_cmd_seq_t
*
cmds
,
sctp_association_t
*
asoc
,
struct
sctp_transport
*
t
)
{
/* Save the TSN away for comparison when we receive CWR */
asoc
->
last_ecne_tsn
=
lowest_tsn
;
asoc
->
need_ecne
=
1
;
/* Update the heartbeat timer. */
if
(
!
mod_timer
(
&
t
->
hb_timer
,
sctp_transport_timeout
(
t
)))
sctp_transport_hold
(
t
);
}
/* Helper function for delayed processing of SCTP ECNE chunk. */
/* RFC 2960 Appendix A
*
* RFC 2481 details a specific bit for a sender to send in
* the header of its next outbound TCP segment to indicate to
* its peer that it has reduced its congestion window. This
* is termed the CWR bit. For SCTP the same indication is made
* by including the CWR chunk. This chunk contains one data
* element, i.e. the TSN number that was sent in the ECNE chunk.
* This element represents the lowest TSN number in the datagram
* that was originally marked with the CE bit.
*/
static
sctp_chunk_t
*
sctp_do_ecn_ecne_work
(
sctp_association_t
*
asoc
,
__u32
lowest_tsn
,
/* Helper function to handle the reception of an HEARTBEAT ACK. */
static
void
sctp_cmd_transport_on
(
sctp_cmd_seq_t
*
cmds
,
sctp_association_t
*
asoc
,
struct
sctp_transport
*
t
,
sctp_chunk_t
*
chunk
)
{
sctp_
chunk_t
*
repl
;
sctp_
sender_hb_info_t
*
hbinfo
;
/*
Our previously transmitted packet ran into some congestion
*
so we should take action by reducing cwnd and ssthresh
*
and then ACK our peer that we we've done so by
*
sending a CWR
.
/*
8.3 Upon the receipt of the HEARTBEAT ACK, the sender of the
*
HEARTBEAT should clear the error counter of the destination
*
transport address to which the HEARTBEAT was sent.
*
The association's overall error count is also cleared
.
*/
t
->
error_count
=
0
;
t
->
asoc
->
overall_error_count
=
0
;
/* First, try to determine if we want to actually lower
* our cwnd variables. Only lower them if the ECNE looks more
* recent than the last response.
/* Mark the destination transport address as active if it is not so
* marked.
*/
if
(
TSN_lt
(
asoc
->
last_cwr_tsn
,
lowest_tsn
))
{
struct
sctp_transport
*
transport
;
if
(
!
t
->
active
)
sctp_assoc_control_transport
(
asoc
,
t
,
SCTP_TRANSPORT_UP
,
SCTP_HEARTBEAT_SUCCESS
);
/* Find which transport's congestion variables
* need to be adjusted.
/* The receiver of the HEARTBEAT ACK should also perform an
* RTT measurement for that destination transport address
* using the time value carried in the HEARTBEAT ACK chunk.
*/
transport
=
sctp_assoc_lookup_tsn
(
asoc
,
lowest_tsn
);
/* Update the congestion variables. */
if
(
transport
)
sctp_transport_lower_cwnd
(
transport
,
SCTP_LOWER_CWND_ECNE
);
asoc
->
last_cwr_tsn
=
lowest_tsn
;
}
hbinfo
=
(
sctp_sender_hb_info_t
*
)
chunk
->
skb
->
data
;
sctp_transport_update_rto
(
t
,
(
jiffies
-
hbinfo
->
sent_at
));
}
/* Always try to quiet the other end. In case of lost CWR,
* resend last_cwr_tsn
.
/* Helper function to do a transport reset at the expiry of the hearbeat
* timer
.
*/
repl
=
sctp_make_cwr
(
asoc
,
asoc
->
last_cwr_tsn
,
chunk
);
static
void
sctp_cmd_transport_reset
(
sctp_cmd_seq_t
*
cmds
,
sctp_association_t
*
asoc
,
struct
sctp_transport
*
t
)
{
sctp_transport_lower_cwnd
(
t
,
SCTP_LOWER_CWND_INACTIVE
);
/* If we run out of memory, it will look like a lost CWR. We'll
* get back in sync eventually.
*/
return
repl
;
/* Mark one strike against a transport. */
sctp_do_8_2_transport_strike
(
asoc
,
t
);
}
/* Helper function to do delayed processing of ECN CWR chunk. */
static
void
sctp_do_ecn_cwr_work
(
sctp_association_t
*
asoc
,
__u32
lowest_tsn
)
/* Helper function to process the process SACK command. */
static
int
sctp_cmd_process_sack
(
sctp_cmd_seq_t
*
cmds
,
sctp_association_t
*
asoc
,
sctp_sackhdr_t
*
sackh
)
{
/* Turn off ECNE getting auto-prepended to every outgoing
* packet
int
err
;
if
(
sctp_outq_sack
(
&
asoc
->
outqueue
,
sackh
))
{
/* There are no more TSNs awaiting SACK. */
err
=
sctp_do_sm
(
SCTP_EVENT_T_OTHER
,
SCTP_ST_OTHER
(
SCTP_EVENT_NO_PENDING_TSN
),
asoc
->
state
,
asoc
->
ep
,
asoc
,
NULL
,
GFP_ATOMIC
);
}
else
{
/* Windows may have opened, so we need
* to check if we have DATA to transmit
*/
asoc
->
need_ecne
=
0
;
err
=
sctp_outq_flush
(
&
asoc
->
outqueue
,
0
);
}
return
err
;
}
/* This macro is to compress the text a bit... */
#define AP(v) asoc->peer.v
/* Helper function to set the timeout value for T2-SHUTDOWN timer and to set
* the transport for a shutdown chunk.
*/
static
void
sctp_cmd_setup_t2
(
sctp_cmd_seq_t
*
cmds
,
sctp_association_t
*
asoc
,
sctp_chunk_t
*
chunk
)
{
struct
sctp_transport
*
t
;
/* Generate SACK if necessary. We call this at the end of a packet. */
int
sctp_gen_sack
(
sctp_association_t
*
asoc
,
int
force
,
sctp_cmd_seq_t
*
commands
)
t
=
sctp_assoc_choose_shutdown_transport
(
asoc
);
asoc
->
shutdown_last_sent_to
=
t
;
asoc
->
timeouts
[
SCTP_EVENT_TIMEOUT_T2_SHUTDOWN
]
=
t
->
rto
;
chunk
->
transport
=
t
;
}
/* Helper function to change the state of an association. */
static
void
sctp_cmd_new_state
(
sctp_cmd_seq_t
*
cmds
,
sctp_association_t
*
asoc
,
sctp_state_t
state
)
{
__u32
ctsn
,
max_tsn_seen
;
sctp_chunk_t
*
sack
;
int
error
=
0
;
if
(
force
)
asoc
->
peer
.
sack_needed
=
1
;
struct
sock
*
sk
=
asoc
->
base
.
sk
;
struct
sctp_opt
*
sp
=
sctp_sk
(
sk
)
;
ctsn
=
sctp_tsnmap_get_ctsn
(
&
asoc
->
peer
.
tsn_map
)
;
max_tsn_seen
=
sctp_tsnmap_get_max_tsn_seen
(
&
asoc
->
peer
.
tsn_map
)
;
asoc
->
state
=
state
;
asoc
->
state_timestamp
=
jiffies
;
/* From 12.2 Parameters necessary per association (i.e. the TCB):
*
* Ack State : This flag indicates if the next received packet
* : is to be responded to with a SACK. ...
* : When DATA chunks are out of order, SACK's
* : are not delayed (see Section 6).
*
* [This is actually not mentioned in Section 6, but we
* implement it here anyway. --piggy]
if
((
SCTP_STATE_ESTABLISHED
==
asoc
->
state
)
||
(
SCTP_STATE_CLOSED
==
asoc
->
state
))
{
/* Wake up any processes waiting in the asoc's wait queue in
* sctp_wait_for_connect() or sctp_wait_for_sndbuf().
*/
if
(
max_tsn_seen
!=
ctsn
)
asoc
->
peer
.
sack_needed
=
1
;
if
(
waitqueue_active
(
&
asoc
->
wait
)
)
wake_up_interruptible
(
&
asoc
->
wait
)
;
/* From 6.2 Acknowledgement on Reception of DATA Chunks:
*
* Section 4.2 of [RFC2581] SHOULD be followed. Specifically,
* an acknowledgement SHOULD be generated for at least every
* second packet (not every second DATA chunk) received, and
* SHOULD be generated within 200 ms of the arrival of any
* unacknowledged DATA chunk. ...
/* Wake up any processes waiting in the sk's sleep queue of
* a TCP-style or UDP-style peeled-off socket in
* sctp_wait_for_accept() or sctp_wait_for_packet().
* For a UDP-style socket, the waiters are woken up by the
* notifications.
*/
if
(
!
asoc
->
peer
.
sack_needed
)
{
/* We will need a SACK for the next packet. */
asoc
->
peer
.
sack_needed
=
1
;
goto
out
;
}
else
{
if
(
asoc
->
a_rwnd
>
asoc
->
rwnd
)
asoc
->
a_rwnd
=
asoc
->
rwnd
;
sack
=
sctp_make_sack
(
asoc
);
if
(
!
sack
)
goto
nomem
;
asoc
->
peer
.
sack_needed
=
0
;
if
(
SCTP_SOCKET_UDP
!=
sp
->
type
)
sk
->
state_change
(
sk
);
}
error
=
sctp_outq_tail
(
&
asoc
->
outqueue
,
sack
);
/* Change the sk->state of a TCP-style socket that has sucessfully
* completed a connect() call.
*/
if
((
SCTP_STATE_ESTABLISHED
==
asoc
->
state
)
&&
(
SCTP_SOCKET_TCP
==
sp
->
type
)
&&
(
SCTP_SS_CLOSED
==
sk
->
state
))
sk
->
state
=
SCTP_SS_ESTABLISHED
;
}
/* Stop the SACK timer. */
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_TIMER_STOP
,
SCTP_TO
(
SCTP_EVENT_TIMEOUT_SACK
));
}
/* These three macros allow us to pull the debugging code out of the
* main flow of sctp_do_sm() to keep attention focused on the real
* functionality there.
*/
#define DEBUG_PRE \
SCTP_DEBUG_PRINTK("sctp_do_sm prefn: " \
"ep %p, %s, %s, asoc %p[%s], %s\n", \
ep, sctp_evttype_tbl[event_type], \
(*debug_fn)(subtype), asoc, \
sctp_state_tbl[state], state_fn->name)
out:
return
error
;
#define DEBUG_POST \
SCTP_DEBUG_PRINTK("sctp_do_sm postfn: " \
"asoc %p, status: %s\n", \
asoc, sctp_status_tbl[status])
nomem:
error
=
-
ENOMEM
;
return
error
;
}
#define DEBUG_POST_SFX \
SCTP_DEBUG_PRINTK("sctp_do_sm post sfx: error %d, asoc %p[%s]\n", \
error, asoc, \
sctp_state_tbl[(asoc && sctp_id2assoc(ep->base.sk, \
sctp_assoc2id(asoc)))?asoc->state:SCTP_STATE_CLOSED])
/* Handle a duplicate TSN. */
void
sctp_do_TSNdup
(
sctp_association_t
*
asoc
,
sctp_chunk_t
*
chunk
,
long
gap
)
/*
* This is the master state machine processing function.
*
* If you want to understand all of lksctp, this is a
* good place to start.
*/
int
sctp_do_sm
(
sctp_event_t
event_type
,
sctp_subtype_t
subtype
,
sctp_state_t
state
,
sctp_endpoint_t
*
ep
,
sctp_association_t
*
asoc
,
void
*
event_arg
,
int
priority
)
{
#if 0
sctp_chunk_t *sack;
sctp_cmd_seq_t
commands
;
sctp_sm_table_entry_t
*
state_fn
;
sctp_disposition_t
status
;
int
error
=
0
;
typedef
const
char
*
(
printfn_t
)(
sctp_subtype_t
);
/* Caution: gap < 2 * SCTP_TSN_MAP_SIZE
* so gap can be negative.
*
* --xguo
static
printfn_t
*
table
[]
=
{
NULL
,
sctp_cname
,
sctp_tname
,
sctp_oname
,
sctp_pname
,
};
printfn_t
*
debug_fn
__attribute__
((
unused
))
=
table
[
event_type
];
/* Look up the state function, run it, and then process the
* side effects. These three steps are the heart of lksctp.
*/
state_fn
=
sctp_sm_lookup_event
(
event_type
,
state
,
subtype
);
sctp_init_cmd_seq
(
&
commands
);
DEBUG_PRE
;
status
=
(
*
state_fn
->
fn
)(
ep
,
asoc
,
subtype
,
event_arg
,
&
commands
);
DEBUG_POST
;
/* Count this TSN. */
if (gap < SCTP_TSN_MAP_SIZE) {
asoc->peer.tsn_map[gap]++;
} else {
asoc->peer.tsn_map_overflow[gap - SCTP_TSN_MAP_SIZE]++;
}
error
=
sctp_side_effects
(
event_type
,
subtype
,
state
,
ep
,
asoc
,
event_arg
,
status
,
&
commands
,
priority
);
DEBUG_POST_SFX
;
/* From 6.2 Acknowledgement on Reception of DATA Chunks
*
* When a packet arrives with duplicate DATA chunk(s)
* and with no new DATA chunk(s), the endpoint MUST
* immediately send a SACK with no delay. If a packet
* arrives with duplicate DATA chunk(s) bundled with
* new DATA chunks, the endpoint MAY immediately send a
* SACK. Normally receipt of duplicate DATA chunks
* will occur when the original SACK chunk was lost and
* the peer's RTO has expired. The duplicate TSN
* number(s) SHOULD be reported in the SACK as
* duplicate.
*/
asoc->counters[SctpCounterAckState] = 2;
#endif /* 0 */
}
/* sctp_do_TSNdup() */
return
error
;
}
#undef AP
#undef DEBUG_PRE
#undef DEBUG_POST
/* When the T3-RTX timer expires, it calls this function to create the
* relevant state machine event.
*/
void
sctp_generate_t3_rtx_event
(
unsigned
long
peer
)
/*****************************************************************
* This the master state function side effect processing function.
*****************************************************************/
int
sctp_side_effects
(
sctp_event_t
event_type
,
sctp_subtype_t
subtype
,
sctp_state_t
state
,
sctp_endpoint_t
*
ep
,
sctp_association_t
*
asoc
,
void
*
event_arg
,
sctp_disposition_t
status
,
sctp_cmd_seq_t
*
commands
,
int
priority
)
{
int
error
;
struct
sctp_transport
*
transport
=
(
struct
sctp_transport
*
)
peer
;
sctp_association_t
*
asoc
=
transport
->
asoc
;
/* Check whether a task is in the sock. */
sctp_bh_lock_sock
(
asoc
->
base
.
sk
);
if
(
sock_owned_by_user
(
asoc
->
base
.
sk
))
{
SCTP_DEBUG_PRINTK
(
"%s:Sock is busy.
\n
"
,
__FUNCTION__
);
/* Try again later. */
if
(
!
mod_timer
(
&
transport
->
T3_rtx_timer
,
jiffies
+
(
HZ
/
20
)))
sctp_transport_hold
(
transport
);
goto
out_unlock
;
}
/* Is this transport really dead and just waiting around for
* the timer to let go of the reference?
/* FIXME - Most of the dispositions left today would be categorized
* as "exceptional" dispositions. For those dispositions, it
* may not be proper to run through any of the commands at all.
* For example, the command interpreter might be run only with
* disposition SCTP_DISPOSITION_CONSUME.
*/
if
(
transport
->
dead
)
goto
out_unlock
;
/* Run through the state machine. */
error
=
sctp_do_sm
(
SCTP_EVENT_T_TIMEOUT
,
SCTP_ST_TIMEOUT
(
SCTP_EVENT_TIMEOUT_T3_RTX
),
asoc
->
state
,
asoc
->
ep
,
asoc
,
transport
,
GFP_ATOMIC
);
if
(
error
)
asoc
->
base
.
sk
->
err
=
-
error
;
if
(
0
!=
(
error
=
sctp_cmd_interpreter
(
event_type
,
subtype
,
state
,
ep
,
asoc
,
event_arg
,
status
,
commands
,
priority
)))
goto
bail
;
out_unlock:
sctp_bh_unlock_sock
(
asoc
->
base
.
sk
);
sctp_transport_put
(
transport
);
}
switch
(
status
)
{
case
SCTP_DISPOSITION_DISCARD
:
SCTP_DEBUG_PRINTK
(
"Ignored sctp protocol event - state %d, "
"event_type %d, event_id %d
\n
"
,
state
,
event_type
,
subtype
.
chunk
);
break
;
/* This is a sa interface for producing timeout events. It works
* for timeouts which use the association as their parameter.
case
SCTP_DISPOSITION_NOMEM
:
/* We ran out of memory, so we need to discard this
* packet.
*/
static
void
sctp_generate_timeout_event
(
sctp_association_t
*
asoc
,
sctp_event_timeout_t
timeout_type
)
{
int
error
=
0
;
sctp_bh_lock_sock
(
asoc
->
base
.
sk
);
if
(
sock_owned_by_user
(
asoc
->
base
.
sk
))
{
SCTP_DEBUG_PRINTK
(
"%s:Sock is busy: timer %d
\n
"
,
__FUNCTION__
,
timeout_type
);
/* BUG--we should now recover some memory, probably by
* reneging...
*/
error
=
-
ENOMEM
;
break
;
/* Try again later. */
if
(
!
mod_timer
(
&
asoc
->
timers
[
timeout_type
],
jiffies
+
(
HZ
/
20
)))
sctp_association_hold
(
asoc
);
goto
out_unlock
;
}
case
SCTP_DISPOSITION_DELETE_TCB
:
/* This should now be a command. */
break
;
/* Is this association really dead and just waiting around for
* the timer to let go of the reference?
case
SCTP_DISPOSITION_CONSUME
:
case
SCTP_DISPOSITION_ABORT
:
/*
* We should no longer have much work to do here as the
* real work has been done as explicit commands above.
*/
if
(
asoc
->
base
.
dead
)
goto
out_unlock
;
break
;
/* Run through the state machine. */
error
=
sctp_do_sm
(
SCTP_EVENT_T_TIMEOUT
,
SCTP_ST_TIMEOUT
(
timeout_type
),
asoc
->
state
,
asoc
->
ep
,
asoc
,
(
void
*
)
timeout_type
,
GFP_ATOMIC
);
case
SCTP_DISPOSITION_VIOLATION
:
printk
(
KERN_ERR
"sctp protocol violation state %d "
"chunkid %d
\n
"
,
state
,
subtype
.
chunk
);
break
;
if
(
error
)
asoc
->
base
.
sk
->
err
=
-
error
;
case
SCTP_DISPOSITION_NOT_IMPL
:
printk
(
KERN_WARNING
"sctp unimplemented feature in state %d, "
"event_type %d, event_id %d
\n
"
,
state
,
event_type
,
subtype
.
chunk
);
break
;
out_unlock:
sctp_bh_unlock_sock
(
asoc
->
base
.
sk
);
sctp_association_put
(
asoc
);
}
case
SCTP_DISPOSITION_BUG
:
printk
(
KERN_ERR
"sctp bug in state %d, "
"event_type %d, event_id %d
\n
"
,
state
,
event_type
,
subtype
.
chunk
);
BUG
();
break
;
void
sctp_generate_t1_cookie_event
(
unsigned
long
data
)
{
sctp_association_t
*
asoc
=
(
sctp_association_t
*
)
data
;
sctp_generate_timeout_event
(
asoc
,
SCTP_EVENT_TIMEOUT_T1_COOKIE
);
}
default:
printk
(
KERN_ERR
"sctp impossible disposition %d "
"in state %d, event_type %d, event_id %d
\n
"
,
status
,
state
,
event_type
,
subtype
.
chunk
);
BUG
();
break
;
};
void
sctp_generate_t1_init_event
(
unsigned
long
data
)
{
sctp_association_t
*
asoc
=
(
sctp_association_t
*
)
data
;
sctp_generate_timeout_event
(
asoc
,
SCTP_EVENT_TIMEOUT_T1_INIT
);
bail:
return
error
;
}
void
sctp_generate_t2_shutdown_event
(
unsigned
long
data
)
{
sctp_association_t
*
asoc
=
(
sctp_association_t
*
)
data
;
sctp_generate_timeout_event
(
asoc
,
SCTP_EVENT_TIMEOUT_T2_SHUTDOWN
);
}
/********************************************************************
* 2nd Level Abstractions
********************************************************************/
void
sctp_generate_t5_shutdown_guard_event
(
unsigned
long
data
)
/* This is the side-effect interpreter. */
int
sctp_cmd_interpreter
(
sctp_event_t
event_type
,
sctp_subtype_t
subtype
,
sctp_state_t
state
,
sctp_endpoint_t
*
ep
,
sctp_association_t
*
asoc
,
void
*
event_arg
,
sctp_disposition_t
status
,
sctp_cmd_seq_t
*
commands
,
int
priority
)
{
sctp_association_t
*
asoc
=
(
sctp_association_t
*
)
data
;
sctp_generate_timeout_event
(
asoc
,
SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD
);
}
/* sctp_generate_t5_shutdown_guard_event() */
int
error
=
0
;
int
force
;
sctp_cmd_t
*
cmd
;
sctp_chunk_t
*
new_obj
;
sctp_chunk_t
*
chunk
=
NULL
;
struct
sctp_packet
*
packet
;
struct
list_head
*
pos
;
struct
timer_list
*
timer
;
unsigned
long
timeout
;
struct
sctp_transport
*
t
;
sctp_sackhdr_t
sackh
;
void
sctp_generate_autoclose_event
(
unsigned
long
data
)
{
sctp_association_t
*
asoc
=
(
sctp_association_t
*
)
data
;
sctp_generate_timeout_event
(
asoc
,
SCTP_EVENT_TIMEOUT_AUTOCLOSE
);
}
if
(
SCTP_EVENT_T_TIMEOUT
!=
event_type
)
chunk
=
(
sctp_chunk_t
*
)
event_arg
;
/* Generate a heart beat event. If the sock is busy, reschedule. Make
* sure that the transport is still valid.
/* Note: This whole file is a huge candidate for rework.
* For example, each command could either have its own handler, so
* the loop would look like:
* while (cmds)
* cmd->handle(x, y, z)
* --jgrimm
*/
void
sctp_generate_heartbeat_event
(
unsigned
long
data
)
{
int
error
=
0
;
struct
sctp_transport
*
transport
=
(
struct
sctp_transport
*
)
data
;
sctp_association_t
*
asoc
=
transport
->
asoc
;
while
(
NULL
!=
(
cmd
=
sctp_next_cmd
(
commands
)))
{
switch
(
cmd
->
verb
)
{
case
SCTP_CMD_NOP
:
/* Do nothing. */
break
;
sctp_bh_lock_sock
(
asoc
->
base
.
sk
);
if
(
sock_owned_by_user
(
asoc
->
base
.
sk
))
{
SCTP_DEBUG_PRINTK
(
"%s:Sock is busy.
\n
"
,
__FUNCTION__
);
case
SCTP_CMD_NEW_ASOC
:
/* Register a new association. */
asoc
=
cmd
->
obj
.
ptr
;
/* Register with the endpoint. */
sctp_endpoint_add_asoc
(
ep
,
asoc
);
sctp_hash_established
(
asoc
);
break
;
/* Try again later. */
if
(
!
mod_timer
(
&
transport
->
hb_timer
,
jiffies
+
(
HZ
/
20
)))
sctp_transport_hold
(
transport
);
goto
out_unlock
;
}
case
SCTP_CMD_UPDATE_ASSOC
:
sctp_assoc_update
(
asoc
,
cmd
->
obj
.
ptr
);
break
;
/* Is this structure just waiting around for us to actually
* get destroyed?
*/
if
(
transport
->
dead
)
goto
out_unlock
;
case
SCTP_CMD_PURGE_OUTQUEUE
:
sctp_outq_teardown
(
&
asoc
->
outqueue
);
break
;
error
=
sctp_do_sm
(
SCTP_EVENT_T_TIMEOUT
,
SCTP_ST_TIMEOUT
(
SCTP_EVENT_TIMEOUT_HEARTBEAT
),
asoc
->
state
,
asoc
->
ep
,
asoc
,
transport
,
GFP_ATOMIC
);
case
SCTP_CMD_DELETE_TCB
:
/* Delete the current association. */
sctp_unhash_established
(
asoc
);
sctp_association_free
(
asoc
);
asoc
=
NULL
;
break
;
case
SCTP_CMD_NEW_STATE
:
/* Enter a new state. */
sctp_cmd_new_state
(
commands
,
asoc
,
cmd
->
obj
.
state
);
break
;
if
(
error
)
asoc
->
base
.
sk
->
err
=
-
error
;
case
SCTP_CMD_REPORT_TSN
:
/* Record the arrival of a TSN. */
sctp_tsnmap_mark
(
&
asoc
->
peer
.
tsn_map
,
cmd
->
obj
.
u32
);
break
;
out_unlock:
sctp_bh_unlock_sock
(
asoc
->
base
.
sk
);
sctp_transport_put
(
transport
);
}
case
SCTP_CMD_GEN_SACK
:
/* Generate a Selective ACK.
* The argument tells us whether to just count
* the packet and MAYBE generate a SACK, or
* force a SACK out.
*/
force
=
cmd
->
obj
.
i32
;
error
=
sctp_gen_sack
(
asoc
,
force
,
commands
);
break
;
/* Inject a SACK Timeout event into the state machine. */
void
sctp_generate_sack_event
(
unsigned
long
data
)
{
sctp_association_t
*
asoc
=
(
sctp_association_t
*
)
data
;
sctp_generate_timeout_event
(
asoc
,
SCTP_EVENT_TIMEOUT_SACK
);
}
case
SCTP_CMD_PROCESS_SACK
:
/* Process an inbound SACK. */
error
=
sctp_cmd_process_sack
(
commands
,
asoc
,
cmd
->
obj
.
ptr
);
break
;
sctp_timer_event_t
*
sctp_timer_events
[
SCTP_NUM_TIMEOUT_TYPES
]
=
{
NULL
,
sctp_generate_t1_cookie_event
,
sctp_generate_t1_init_event
,
sctp_generate_t2_shutdown_event
,
NULL
,
sctp_generate_t5_shutdown_guard_event
,
sctp_generate_heartbeat_event
,
sctp_generate_sack_event
,
sctp_generate_autoclose_event
,
};
case
SCTP_CMD_GEN_INIT_ACK
:
/* Generate an INIT ACK chunk. */
new_obj
=
sctp_make_init_ack
(
asoc
,
chunk
,
GFP_ATOMIC
,
0
);
if
(
!
new_obj
)
goto
nomem
;
/********************************************************************
* 3rd Level Abstractions
********************************************************************/
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_REPLY
,
SCTP_CHUNK
(
new_obj
));
break
;
/* RFC 2960 8.2 Path Failure Detection
*
* When its peer endpoint is multi-homed, an endpoint should keep a
* error counter for each of the destination transport addresses of the
* peer endpoint.
*
* Each time the T3-rtx timer expires on any address, or when a
* HEARTBEAT sent to an idle address is not acknowledged within a RTO,
* the error counter of that destination address will be incremented.
* When the value in the error counter exceeds the protocol parameter
* 'Path.Max.Retrans' of that destination address, the endpoint should
* mark the destination transport address as inactive, and a
* notification SHOULD be sent to the upper layer.
*
*/
static
void
sctp_do_8_2_transport_strike
(
sctp_association_t
*
asoc
,
struct
sctp_transport
*
transport
)
{
/* The check for association's overall error counter exceeding the
* threshold is done in the state function.
case
SCTP_CMD_PEER_INIT
:
/* Process a unified INIT from the peer.
* Note: Only used during INIT-ACK processing. If
* there is an error just return to the outter
* layer which will bail.
*/
asoc
->
overall_error_count
++
;
error
=
sctp_cmd_process_init
(
commands
,
asoc
,
chunk
,
cmd
->
obj
.
ptr
,
priority
);
break
;
if
(
transport
->
active
&&
(
transport
->
error_count
++
>=
transport
->
error_threshold
))
{
SCTP_DEBUG_PRINTK
(
"transport_strike: transport "
"IP:%d.%d.%d.%d failed.
\n
"
,
NIPQUAD
(
transport
->
ipaddr
.
v4
.
sin_addr
));
sctp_assoc_control_transport
(
asoc
,
transport
,
SCTP_TRANSPORT_DOWN
,
SCTP_FAILED_THRESHOLD
);
case
SCTP_CMD_GEN_COOKIE_ECHO
:
/* Generate a COOKIE ECHO chunk. */
new_obj
=
sctp_make_cookie_echo
(
asoc
,
chunk
);
if
(
!
new_obj
)
{
if
(
cmd
->
obj
.
ptr
)
sctp_free_chunk
(
cmd
->
obj
.
ptr
);
goto
nomem
;
}
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_REPLY
,
SCTP_CHUNK
(
new_obj
));
/* E2) For the destination address for which the timer
* expires, set RTO <- RTO * 2 ("back off the timer"). The
* maximum value discussed in rule C7 above (RTO.max) may be
* used to provide an upper bound to this doubling operation.
/* If there is an ERROR chunk to be sent along with
* the COOKIE_ECHO, send it, too.
*/
transport
->
rto
=
min
((
transport
->
rto
*
2
),
transport
->
asoc
->
rto_max
);
}
/* Worker routine to handle INIT command failure. */
static
void
sctp_cmd_init_failed
(
sctp_cmd_seq_t
*
commands
,
sctp_association_t
*
asoc
)
{
struct
sctp_ulpevent
*
event
;
if
(
cmd
->
obj
.
ptr
)
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_REPLY
,
SCTP_CHUNK
(
cmd
->
obj
.
ptr
));
break
;
event
=
sctp_ulpevent_make_assoc_change
(
asoc
,
0
,
SCTP_CANT_STR_ASSOC
,
0
,
0
,
0
,
GFP_ATOMIC
)
;
case
SCTP_CMD_GEN_SHUTDOWN
:
/* Generate SHUTDOWN when in SHUTDOWN_SENT state.
* Reset error counts.
*/
asoc
->
overall_error_count
=
0
;
if
(
event
)
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_EVENT_ULP
,
SCTP_ULPEVENT
(
event
));
/* Generate a SHUTDOWN chunk. */
new_obj
=
sctp_make_shutdown
(
asoc
);
if
(
!
new_obj
)
goto
nomem
;
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_REPLY
,
SCTP_CHUNK
(
new_obj
));
break
;
/* FIXME: We need to handle data possibly either
* sent via COOKIE-ECHO bundling or just waiting in
* the transmit queue, if the user has enabled
* SEND_FAILED notifications.
*/
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_DELETE_TCB
,
SCTP_NULL
());
}
case
SCTP_CMD_CHUNK_ULP
:
/* Send a chunk to the sockets layer. */
SCTP_DEBUG_PRINTK
(
"sm_sideff: %s %p, %s %p.
\n
"
,
"chunk_up:"
,
cmd
->
obj
.
ptr
,
"ulpq:"
,
&
asoc
->
ulpq
);
sctp_ulpq_tail_data
(
&
asoc
->
ulpq
,
cmd
->
obj
.
ptr
,
GFP_ATOMIC
);
break
;
/* Worker routine to handle SCTP_CMD_ASSOC_FAILED. */
static
void
sctp_cmd_assoc_failed
(
sctp_cmd_seq_t
*
commands
,
sctp_association_t
*
asoc
,
sctp_event_t
event_type
,
sctp_subtype_t
subtype
,
sctp_chunk_t
*
chunk
)
{
struct
sctp_ulpevent
*
event
;
__u16
error
=
0
;
case
SCTP_CMD_EVENT_ULP
:
/* Send a notification to the sockets layer. */
SCTP_DEBUG_PRINTK
(
"sm_sideff: %s %p, %s %p.
\n
"
,
"event_up:"
,
cmd
->
obj
.
ptr
,
"ulpq:"
,
&
asoc
->
ulpq
);
sctp_ulpq_tail_event
(
&
asoc
->
ulpq
,
cmd
->
obj
.
ptr
);
break
;
switch
(
event_type
)
{
case
SCTP_EVENT_T_PRIMITIVE
:
if
(
SCTP_PRIMITIVE_ABORT
==
subtype
.
primitive
)
error
=
SCTP_ERROR_USER_ABORT
;
case
SCTP_CMD_REPLY
:
/* Send a chunk to our peer. */
error
=
sctp_outq_tail
(
&
asoc
->
outqueue
,
cmd
->
obj
.
ptr
)
;
break
;
case
SCTP_EVENT_T_CHUNK
:
if
(
chunk
&&
(
SCTP_CID_ABORT
==
chunk
->
chunk_hdr
->
type
)
&&
(
ntohs
(
chunk
->
chunk_hdr
->
length
)
>=
(
sizeof
(
struct
sctp_chunkhdr
)
+
sizeof
(
struct
sctp_errhdr
))))
{
error
=
((
sctp_errhdr_t
*
)
chunk
->
skb
->
data
)
->
cause
;
}
case
SCTP_CMD_SEND_PKT
:
/* Send a full packet to our peer. */
packet
=
cmd
->
obj
.
ptr
;
sctp_packet_transmit
(
packet
);
sctp_ootb_pkt_free
(
packet
);
break
;
default:
case
SCTP_CMD_RETRAN
:
/* Mark a transport for retransmission. */
sctp_retransmit
(
&
asoc
->
outqueue
,
cmd
->
obj
.
transport
,
SCTP_RTXR_T3_RTX
);
break
;
}
/* Cancel any partial delivery in progress. */
sctp_ulpq_abort_pd
(
&
asoc
->
ulpq
,
GFP_ATOMIC
);
case
SCTP_CMD_TRANSMIT
:
/* Kick start transmission. */
error
=
sctp_outq_flush
(
&
asoc
->
outqueue
,
0
);
break
;
event
=
sctp_ulpevent_make_assoc_change
(
asoc
,
0
,
SCTP_COMM_LOST
,
error
,
0
,
0
,
GFP_ATOMIC
);
if
(
event
)
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_EVENT_ULP
,
SCTP_ULPEVENT
(
event
));
case
SCTP_CMD_ECN_CE
:
/* Do delayed CE processing. */
sctp_do_ecn_ce_work
(
asoc
,
cmd
->
obj
.
u32
);
break
;
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_NEW_STATE
,
SCTP_STATE
(
SCTP_STATE_CLOSED
));
case
SCTP_CMD_ECN_ECNE
:
/* Do delayed ECNE processing. */
new_obj
=
sctp_do_ecn_ecne_work
(
asoc
,
cmd
->
obj
.
u32
,
chunk
);
if
(
new_obj
)
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_REPLY
,
SCTP_CHUNK
(
new_obj
));
break
;
/* FIXME: We need to handle data that could not be sent or was not
* acked, if the user has enabled SEND_FAILED notifications.
*/
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_DELETE_TCB
,
SCTP_NULL
());
}
case
SCTP_CMD_ECN_CWR
:
/* Do delayed CWR processing. */
sctp_do_ecn_cwr_work
(
asoc
,
cmd
->
obj
.
u32
);
break
;
/* Process an init chunk (may be real INIT/INIT-ACK or an embedded INIT
* inside the cookie. In reality, this is only used for INIT-ACK processing
* since all other cases use "temporary" associations and can do all
* their work in statefuns directly.
*/
static
int
sctp_cmd_process_init
(
sctp_cmd_seq_t
*
commands
,
sctp_association_t
*
asoc
,
sctp_chunk_t
*
chunk
,
sctp_init_chunk_t
*
peer_init
,
int
priority
)
{
int
error
;
case
SCTP_CMD_SETUP_T2
:
sctp_cmd_setup_t2
(
commands
,
asoc
,
cmd
->
obj
.
ptr
);
break
;
/* We only process the init as a sideeffect in a single
* case. This is when we process the INIT-ACK. If we
* fail during INIT processing (due to malloc problems),
* just return the error and stop processing the stack.
*/
case
SCTP_CMD_TIMER_START
:
timer
=
&
asoc
->
timers
[
cmd
->
obj
.
to
];
timeout
=
asoc
->
timeouts
[
cmd
->
obj
.
to
];
if
(
!
timeout
)
BUG
();
if
(
!
sctp_process_init
(
asoc
,
chunk
->
chunk_hdr
->
type
,
sctp_source
(
chunk
),
peer_init
,
priority
))
error
=
-
ENOMEM
;
else
error
=
0
;
timer
->
expires
=
jiffies
+
timeout
;
sctp_association_hold
(
asoc
);
add_timer
(
timer
);
break
;
return
error
;
}
case
SCTP_CMD_TIMER_RESTART
:
timer
=
&
asoc
->
timers
[
cmd
->
obj
.
to
];
timeout
=
asoc
->
timeouts
[
cmd
->
obj
.
to
];
if
(
!
mod_timer
(
timer
,
jiffies
+
timeout
))
sctp_association_hold
(
asoc
);
break
;
/* Helper function to break out starting up of heartbeat timers. */
static
void
sctp_cmd_hb_timers_start
(
sctp_cmd_seq_t
*
cmds
,
sctp_association_t
*
asoc
)
{
struct
sctp_transport
*
t
;
struct
list_head
*
pos
;
case
SCTP_CMD_TIMER_STOP
:
timer
=
&
asoc
->
timers
[
cmd
->
obj
.
to
];
if
(
timer_pending
(
timer
)
&&
del_timer
(
timer
))
sctp_association_put
(
asoc
);
break
;
/* Start a heartbeat timer for each transport on the association.
* hold a reference on the transport to make sure none of
* the needed data structures go away.
case
SCTP_CMD_INIT_RESTART
:
/* Do the needed accounting and updates
* associated with restarting an initialization
* timer.
*/
list_for_each
(
pos
,
&
asoc
->
peer
.
transport_addr_list
)
{
t
=
list_entry
(
pos
,
struct
sctp_transport
,
transports
);
if
(
!
mod_timer
(
&
t
->
hb_timer
,
sctp_transport_timeout
(
t
)))
sctp_transport_hold
(
t
);
asoc
->
counters
[
SCTP_COUNTER_INIT_ERROR
]
++
;
asoc
->
timeouts
[
cmd
->
obj
.
to
]
*=
2
;
if
(
asoc
->
timeouts
[
cmd
->
obj
.
to
]
>
asoc
->
max_init_timeo
)
{
asoc
->
timeouts
[
cmd
->
obj
.
to
]
=
asoc
->
max_init_timeo
;
}
}
static
void
sctp_cmd_hb_timers_stop
(
sctp_cmd_seq_t
*
cmds
,
sctp_association_t
*
asoc
)
{
struct
sctp_transport
*
t
;
struct
list_head
*
pos
;
/* Stop all heartbeat timers. */
/* If we've sent any data bundled with
* COOKIE-ECHO we need to resend.
*/
list_for_each
(
pos
,
&
asoc
->
peer
.
transport_addr_list
)
{
t
=
list_entry
(
pos
,
struct
sctp_transport
,
transports
);
if
(
del_timer
(
&
t
->
hb_timer
))
sctp_transport_put
(
t
);
t
=
list_entry
(
pos
,
struct
sctp_transport
,
transports
);
sctp_retransmit_mark
(
&
asoc
->
outqueue
,
t
,
0
);
}
}
/* Helper function to update the heartbeat timer. */
static
void
sctp_cmd_hb_timer_update
(
sctp_cmd_seq_t
*
cmds
,
sctp_association_t
*
asoc
,
struct
sctp_transport
*
t
)
{
/* Update the heartbeat timer. */
if
(
!
mod_timer
(
&
t
->
hb_timer
,
sctp_transport_timeout
(
t
)))
sctp_transport_hold
(
t
);
}
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_TIMER_RESTART
,
SCTP_TO
(
cmd
->
obj
.
to
));
break
;
/* Helper function to handle the reception of an HEARTBEAT ACK. */
static
void
sctp_cmd_transport_on
(
sctp_cmd_seq_t
*
cmds
,
sctp_association_t
*
asoc
,
struct
sctp_transport
*
t
,
sctp_chunk_t
*
chunk
)
{
sctp_sender_hb_info_t
*
hbinfo
;
case
SCTP_CMD_INIT_FAILED
:
sctp_cmd_init_failed
(
commands
,
asoc
);
break
;
/* 8.3 Upon the receipt of the HEARTBEAT ACK, the sender of the
* HEARTBEAT should clear the error counter of the destination
* transport address to which the HEARTBEAT was sent.
* The association's overall error count is also cleared.
*/
t
->
error_count
=
0
;
t
->
asoc
->
overall_error_count
=
0
;
case
SCTP_CMD_ASSOC_FAILED
:
sctp_cmd_assoc_failed
(
commands
,
asoc
,
event_type
,
subtype
,
chunk
);
break
;
/* Mark the destination transport address as active if it is not so
* marked.
*/
if
(
!
t
->
active
)
sctp_assoc_control_transport
(
asoc
,
t
,
SCTP_TRANSPORT_UP
,
SCTP_HEARTBEAT_SUCCESS
);
case
SCTP_CMD_COUNTER_INC
:
asoc
->
counters
[
cmd
->
obj
.
counter
]
++
;
break
;
/* The receiver of the HEARTBEAT ACK should also perform an
* RTT measurement for that destination transport address
* using the time value carried in the HEARTBEAT ACK chunk.
*/
hbinfo
=
(
sctp_sender_hb_info_t
*
)
chunk
->
skb
->
data
;
sctp_transport_update_rto
(
t
,
(
jiffies
-
hbinfo
->
sent_at
));
}
case
SCTP_CMD_COUNTER_RESET
:
asoc
->
counters
[
cmd
->
obj
.
counter
]
=
0
;
break
;
/* Helper function to do a transport reset at the expiry of the hearbeat
* timer.
*/
static
void
sctp_cmd_transport_reset
(
sctp_cmd_seq_t
*
cmds
,
sctp_association_t
*
asoc
,
struct
sctp_transport
*
t
)
{
sctp_transport_lower_cwnd
(
t
,
SCTP_LOWER_CWND_INACTIVE
)
;
case
SCTP_CMD_REPORT_DUP
:
sctp_tsnmap_mark_dup
(
&
asoc
->
peer
.
tsn_map
,
cmd
->
obj
.
u32
);
break
;
case
SCTP_CMD_REPORT_BAD_TAG
:
SCTP_DEBUG_PRINTK
(
"vtag mismatch!
\n
"
);
break
;
case
SCTP_CMD_STRIKE
:
/* Mark one strike against a transport. */
sctp_do_8_2_transport_strike
(
asoc
,
t
);
}
sctp_do_8_2_transport_strike
(
asoc
,
cmd
->
obj
.
transpor
t
);
break
;
/* Helper function to process the process SACK command. */
static
int
sctp_cmd_process_sack
(
sctp_cmd_seq_t
*
cmds
,
sctp_association_t
*
asoc
,
sctp_sackhdr_t
*
sackh
)
{
int
err
;
case
SCTP_CMD_TRANSPORT_RESET
:
t
=
cmd
->
obj
.
transport
;
sctp_cmd_transport_reset
(
commands
,
asoc
,
t
);
break
;
if
(
sctp_outq_sack
(
&
asoc
->
outqueue
,
sackh
))
{
/* There are no more TSNs awaiting SACK. */
err
=
sctp_do_sm
(
SCTP_EVENT_T_OTHER
,
SCTP_ST_OTHER
(
SCTP_EVENT_NO_PENDING_TSN
),
asoc
->
state
,
asoc
->
ep
,
asoc
,
NULL
,
GFP_ATOMIC
);
}
else
{
/* Windows may have opened, so we need
* to check if we have DATA to transmit
*/
err
=
sctp_outq_flush
(
&
asoc
->
outqueue
,
0
);
}
case
SCTP_CMD_TRANSPORT_ON
:
t
=
cmd
->
obj
.
transport
;
sctp_cmd_transport_on
(
commands
,
asoc
,
t
,
chunk
);
break
;
return
err
;
}
case
SCTP_CMD_HB_TIMERS_START
:
sctp_cmd_hb_timers_start
(
commands
,
asoc
);
break
;
/* Helper function to set the timeout value for T2-SHUTDOWN timer and to set
* the transport for a shutdown chunk.
*/
static
void
sctp_cmd_setup_t2
(
sctp_cmd_seq_t
*
cmds
,
sctp_association_t
*
asoc
,
sctp_chunk_t
*
chunk
)
{
struct
sctp_transport
*
t
;
case
SCTP_CMD_HB_TIMER_UPDATE
:
t
=
cmd
->
obj
.
transport
;
sctp_cmd_hb_timer_update
(
commands
,
asoc
,
t
);
break
;
t
=
sctp_assoc_choose_shutdown_transport
(
asoc
);
asoc
->
shutdown_last_sent_to
=
t
;
asoc
->
timeouts
[
SCTP_EVENT_TIMEOUT_T2_SHUTDOWN
]
=
t
->
rto
;
chunk
->
transport
=
t
;
}
case
SCTP_CMD_HB_TIMERS_STOP
:
sctp_cmd_hb_timers_stop
(
commands
,
asoc
);
break
;
/* Helper function to change the state of an association. */
static
void
sctp_cmd_new_state
(
sctp_cmd_seq_t
*
cmds
,
sctp_association_t
*
asoc
,
sctp_state_t
state
)
{
case
SCTP_CMD_REPORT_ERROR
:
error
=
cmd
->
obj
.
error
;
break
;
struct
sock
*
sk
=
asoc
->
base
.
sk
;
struct
sctp_opt
*
sp
=
sctp_sk
(
sk
);
case
SCTP_CMD_PROCESS_CTSN
:
/* Dummy up a SACK for processing. */
sackh
.
cum_tsn_ack
=
cmd
->
obj
.
u32
;
sackh
.
a_rwnd
=
0
;
sackh
.
num_gap_ack_blocks
=
0
;
sackh
.
num_dup_tsns
=
0
;
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_PROCESS_SACK
,
SCTP_SACKH
(
&
sackh
));
break
;
asoc
->
state
=
state
;
asoc
->
state_timestamp
=
jiffies
;
case
SCTP_CMD_DISCARD_PACKET
:
/* We need to discard the whole packet. */
chunk
->
pdiscard
=
1
;
break
;
if
((
SCTP_STATE_ESTABLISHED
==
asoc
->
state
)
||
(
SCTP_STATE_CLOSED
==
asoc
->
state
))
{
/* Wake up any processes waiting in the asoc's wait queue in
* sctp_wait_for_connect() or sctp_wait_for_sndbuf().
*/
if
(
waitqueue_active
(
&
asoc
->
wait
))
wake_up_interruptible
(
&
asoc
->
wait
);
case
SCTP_CMD_RTO_PENDING
:
t
=
cmd
->
obj
.
transport
;
t
->
rto_pending
=
1
;
break
;
/* Wake up any processes waiting in the sk's sleep queue of
* a TCP-style or UDP-style peeled-off socket in
* sctp_wait_for_accept() or sctp_wait_for_packet().
* For a UDP-style socket, the waiters are woken up by the
* notifications.
*/
if
(
SCTP_SOCKET_UDP
!=
sp
->
type
)
sk
->
state_change
(
sk
);
case
SCTP_CMD_PART_DELIVER
:
sctp_ulpq_partial_delivery
(
&
asoc
->
ulpq
,
cmd
->
obj
.
ptr
,
GFP_ATOMIC
);
break
;
case
SCTP_CMD_RENEGE
:
sctp_ulpq_renege
(
&
asoc
->
ulpq
,
cmd
->
obj
.
ptr
,
GFP_ATOMIC
);
break
;
default:
printk
(
KERN_WARNING
"Impossible command: %u, %p
\n
"
,
cmd
->
verb
,
cmd
->
obj
.
ptr
);
break
;
};
if
(
error
)
return
error
;
}
/* Change the sk->state of a TCP-style socket that has sucessfully
* completed a connect() call.
*/
if
((
SCTP_STATE_ESTABLISHED
==
asoc
->
state
)
&&
(
SCTP_SOCKET_TCP
==
sp
->
type
)
&&
(
SCTP_SS_CLOSED
==
sk
->
state
))
sk
->
state
=
SCTP_SS_ESTABLISHED
;
return
error
;
nomem:
error
=
-
ENOMEM
;
return
error
;
}
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment