Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
b91ddd84
Commit
b91ddd84
authored
Aug 30, 2007
by
David S. Miller
Browse files
Options
Browse Files
Download
Plain Diff
Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/vxy/lksctp-dev
parents
05bb1fad
cb243a1a
Changes
10
Show whitespace changes
Inline
Side-by-side
Showing
10 changed files
with
190 additions
and
77 deletions
+190
-77
include/net/sctp/sm.h
include/net/sctp/sm.h
+1
-1
include/net/sctp/structs.h
include/net/sctp/structs.h
+1
-0
include/net/sctp/ulpqueue.h
include/net/sctp/ulpqueue.h
+1
-0
net/sctp/associola.c
net/sctp/associola.c
+6
-1
net/sctp/outqueue.c
net/sctp/outqueue.c
+7
-0
net/sctp/sm_make_chunk.c
net/sctp/sm_make_chunk.c
+77
-35
net/sctp/sm_sideeffect.c
net/sctp/sm_sideeffect.c
+6
-2
net/sctp/sm_statefuns.c
net/sctp/sm_statefuns.c
+26
-25
net/sctp/socket.c
net/sctp/socket.c
+3
-0
net/sctp/ulpqueue.c
net/sctp/ulpqueue.c
+62
-13
No files found.
include/net/sctp/sm.h
View file @
b91ddd84
...
...
@@ -214,7 +214,7 @@ struct sctp_chunk *sctp_make_shutdown_ack(const struct sctp_association *asoc,
const
struct
sctp_chunk
*
);
struct
sctp_chunk
*
sctp_make_shutdown_complete
(
const
struct
sctp_association
*
,
const
struct
sctp_chunk
*
);
void
sctp_init_cause
(
struct
sctp_chunk
*
,
__be16
cause
,
const
void
*
,
size_t
);
void
sctp_init_cause
(
struct
sctp_chunk
*
,
__be16
cause
,
size_t
);
struct
sctp_chunk
*
sctp_make_abort
(
const
struct
sctp_association
*
,
const
struct
sctp_chunk
*
,
const
size_t
hint
);
...
...
include/net/sctp/structs.h
View file @
b91ddd84
...
...
@@ -726,6 +726,7 @@ int sctp_user_addto_chunk(struct sctp_chunk *chunk, int off, int len,
struct
iovec
*
data
);
void
sctp_chunk_free
(
struct
sctp_chunk
*
);
void
*
sctp_addto_chunk
(
struct
sctp_chunk
*
,
int
len
,
const
void
*
data
);
void
*
sctp_addto_param
(
struct
sctp_chunk
*
,
int
len
,
const
void
*
data
);
struct
sctp_chunk
*
sctp_chunkify
(
struct
sk_buff
*
,
const
struct
sctp_association
*
,
struct
sock
*
);
...
...
include/net/sctp/ulpqueue.h
View file @
b91ddd84
...
...
@@ -83,6 +83,7 @@ int sctp_clear_pd(struct sock *sk, struct sctp_association *asoc);
/* Skip over an SSN. */
void
sctp_ulpq_skip
(
struct
sctp_ulpq
*
ulpq
,
__u16
sid
,
__u16
ssn
);
void
sctp_ulpq_reasm_flushtsn
(
struct
sctp_ulpq
*
,
__u32
);
#endif
/* __sctp_ulpqueue_h__ */
...
...
net/sctp/associola.c
View file @
b91ddd84
...
...
@@ -727,7 +727,12 @@ void sctp_assoc_control_transport(struct sctp_association *asoc,
break
;
case
SCTP_TRANSPORT_DOWN
:
/* if the transort was never confirmed, do not transition it
* to inactive state.
*/
if
(
transport
->
state
!=
SCTP_UNCONFIRMED
)
transport
->
state
=
SCTP_INACTIVE
;
spc_state
=
SCTP_ADDR_UNREACHABLE
;
break
;
...
...
net/sctp/outqueue.c
View file @
b91ddd84
...
...
@@ -421,6 +421,13 @@ void sctp_retransmit_mark(struct sctp_outq *q,
*/
if
((
fast_retransmit
&&
(
chunk
->
fast_retransmit
>
0
))
||
(
!
fast_retransmit
&&
!
chunk
->
tsn_gap_acked
))
{
/* If this chunk was sent less then 1 rto ago, do not
* retransmit this chunk, but give the peer time
* to acknowlege it.
*/
if
((
jiffies
-
chunk
->
sent_at
)
<
transport
->
rto
)
continue
;
/* RFC 2960 6.2.1 Processing a Received SACK
*
* C) Any time a DATA chunk is marked for
...
...
net/sctp/sm_make_chunk.c
View file @
b91ddd84
...
...
@@ -110,7 +110,7 @@ static const struct sctp_paramhdr prsctp_param = {
* abort chunk.
*/
void
sctp_init_cause
(
struct
sctp_chunk
*
chunk
,
__be16
cause_code
,
const
void
*
payload
,
size_t
paylen
)
size_t
paylen
)
{
sctp_errhdr_t
err
;
__u16
len
;
...
...
@@ -120,7 +120,6 @@ void sctp_init_cause(struct sctp_chunk *chunk, __be16 cause_code,
len
=
sizeof
(
sctp_errhdr_t
)
+
paylen
;
err
.
length
=
htons
(
len
);
chunk
->
subh
.
err_hdr
=
sctp_addto_chunk
(
chunk
,
sizeof
(
sctp_errhdr_t
),
&
err
);
sctp_addto_chunk
(
chunk
,
paylen
,
payload
);
}
/* 3.3.2 Initiation (INIT) (1)
...
...
@@ -780,8 +779,8 @@ struct sctp_chunk *sctp_make_abort_no_data(
/* Put the tsn back into network byte order. */
payload
=
htonl
(
tsn
);
sctp_init_cause
(
retval
,
SCTP_ERROR_NO_DATA
,
(
const
void
*
)
&
payload
,
sizeof
(
payload
)
);
sctp_init_cause
(
retval
,
SCTP_ERROR_NO_DATA
,
sizeof
(
payload
));
sctp_addto_chunk
(
retval
,
sizeof
(
payload
),
(
const
void
*
)
&
payload
);
/* RFC 2960 6.4 Multi-homed SCTP Endpoints
*
...
...
@@ -823,7 +822,8 @@ struct sctp_chunk *sctp_make_abort_user(const struct sctp_association *asoc,
goto
err_copy
;
}
sctp_init_cause
(
retval
,
SCTP_ERROR_USER_ABORT
,
payload
,
paylen
);
sctp_init_cause
(
retval
,
SCTP_ERROR_USER_ABORT
,
paylen
);
sctp_addto_chunk
(
retval
,
paylen
,
payload
);
if
(
paylen
)
kfree
(
payload
);
...
...
@@ -850,15 +850,17 @@ struct sctp_chunk *sctp_make_abort_violation(
struct
sctp_paramhdr
phdr
;
retval
=
sctp_make_abort
(
asoc
,
chunk
,
sizeof
(
sctp_errhdr_t
)
+
paylen
+
sizeof
(
sctp_
chunk
hdr_t
));
+
sizeof
(
sctp_
param
hdr_t
));
if
(
!
retval
)
goto
end
;
sctp_init_cause
(
retval
,
SCTP_ERROR_PROTO_VIOLATION
,
payload
,
paylen
);
sctp_init_cause
(
retval
,
SCTP_ERROR_PROTO_VIOLATION
,
paylen
+
sizeof
(
sctp_paramhdr_t
));
phdr
.
type
=
htons
(
chunk
->
chunk_hdr
->
type
);
phdr
.
length
=
chunk
->
chunk_hdr
->
length
;
sctp_addto_chunk
(
retval
,
sizeof
(
sctp_paramhdr_t
),
&
phdr
);
sctp_addto_chunk
(
retval
,
paylen
,
payload
);
sctp_addto_param
(
retval
,
sizeof
(
sctp_paramhdr_t
),
&
phdr
);
end:
return
retval
;
...
...
@@ -955,7 +957,8 @@ struct sctp_chunk *sctp_make_op_error(const struct sctp_association *asoc,
if
(
!
retval
)
goto
nodata
;
sctp_init_cause
(
retval
,
cause_code
,
payload
,
paylen
);
sctp_init_cause
(
retval
,
cause_code
,
paylen
);
sctp_addto_chunk
(
retval
,
paylen
,
payload
);
nodata:
return
retval
;
...
...
@@ -1128,7 +1131,7 @@ void *sctp_addto_chunk(struct sctp_chunk *chunk, int len, const void *data)
void
*
target
;
void
*
padding
;
int
chunklen
=
ntohs
(
chunk
->
chunk_hdr
->
length
);
int
padlen
=
chunklen
%
4
;
int
padlen
=
WORD_ROUND
(
chunklen
)
-
chunklen
;
padding
=
skb_put
(
chunk
->
skb
,
padlen
);
target
=
skb_put
(
chunk
->
skb
,
len
);
...
...
@@ -1143,6 +1146,25 @@ void *sctp_addto_chunk(struct sctp_chunk *chunk, int len, const void *data)
return
target
;
}
/* Append bytes to the end of a parameter. Will panic if chunk is not big
* enough.
*/
void
*
sctp_addto_param
(
struct
sctp_chunk
*
chunk
,
int
len
,
const
void
*
data
)
{
void
*
target
;
int
chunklen
=
ntohs
(
chunk
->
chunk_hdr
->
length
);
target
=
skb_put
(
chunk
->
skb
,
len
);
memcpy
(
target
,
data
,
len
);
/* Adjust the chunk length field. */
chunk
->
chunk_hdr
->
length
=
htons
(
chunklen
+
len
);
chunk
->
chunk_end
=
skb_tail_pointer
(
chunk
->
skb
);
return
target
;
}
/* Append bytes from user space to the end of a chunk. Will panic if
* chunk is not big enough.
* Returns a kernel err value.
...
...
@@ -1174,25 +1196,36 @@ int sctp_user_addto_chunk(struct sctp_chunk *chunk, int off, int len,
*/
void
sctp_chunk_assign_ssn
(
struct
sctp_chunk
*
chunk
)
{
struct
sctp_datamsg
*
msg
;
struct
sctp_chunk
*
lchunk
;
struct
sctp_stream
*
stream
;
__u16
ssn
;
__u16
sid
;
if
(
chunk
->
has_ssn
)
return
;
/* This is the last possible instant to assign a SSN. */
if
(
chunk
->
chunk_hdr
->
flags
&
SCTP_DATA_UNORDERED
)
{
/* All fragments will be on the same stream */
sid
=
ntohs
(
chunk
->
subh
.
data_hdr
->
stream
);
stream
=
&
chunk
->
asoc
->
ssnmap
->
out
;
/* Now assign the sequence number to the entire message.
* All fragments must have the same stream sequence number.
*/
msg
=
chunk
->
msg
;
list_for_each_entry
(
lchunk
,
&
msg
->
chunks
,
frag_list
)
{
if
(
lchunk
->
chunk_hdr
->
flags
&
SCTP_DATA_UNORDERED
)
{
ssn
=
0
;
}
else
{
sid
=
ntohs
(
chunk
->
subh
.
data_hdr
->
stream
);
if
(
chunk
->
chunk_hdr
->
flags
&
SCTP_DATA_LAST_FRAG
)
ssn
=
sctp_ssn_next
(
&
chunk
->
asoc
->
ssnmap
->
out
,
sid
);
if
(
lchunk
->
chunk_hdr
->
flags
&
SCTP_DATA_LAST_FRAG
)
ssn
=
sctp_ssn_next
(
stream
,
sid
);
else
ssn
=
sctp_ssn_peek
(
&
chunk
->
asoc
->
ssnmap
->
out
,
sid
);
ssn
=
sctp_ssn_peek
(
stream
,
sid
);
}
chunk
->
subh
.
data_hdr
->
ssn
=
htons
(
ssn
);
chunk
->
has_ssn
=
1
;
lchunk
->
subh
.
data_hdr
->
ssn
=
htons
(
ssn
);
lchunk
->
has_ssn
=
1
;
}
}
/* Helper function to assign a TSN if needed. This assumes that both
...
...
@@ -1466,7 +1499,8 @@ struct sctp_association *sctp_unpack_cookie(
__be32
n
=
htonl
(
usecs
);
sctp_init_cause
(
*
errp
,
SCTP_ERROR_STALE_COOKIE
,
&
n
,
sizeof
(
n
));
sizeof
(
n
));
sctp_addto_chunk
(
*
errp
,
sizeof
(
n
),
&
n
);
*
error
=
-
SCTP_IERROR_STALE_COOKIE
;
}
else
*
error
=
-
SCTP_IERROR_NOMEM
;
...
...
@@ -1556,7 +1590,8 @@ static int sctp_process_missing_param(const struct sctp_association *asoc,
report
.
num_missing
=
htonl
(
1
);
report
.
type
=
paramtype
;
sctp_init_cause
(
*
errp
,
SCTP_ERROR_MISS_PARAM
,
&
report
,
sizeof
(
report
));
sizeof
(
report
));
sctp_addto_chunk
(
*
errp
,
sizeof
(
report
),
&
report
);
}
/* Stop processing this chunk. */
...
...
@@ -1574,7 +1609,7 @@ static int sctp_process_inv_mandatory(const struct sctp_association *asoc,
*
errp
=
sctp_make_op_error_space
(
asoc
,
chunk
,
0
);
if
(
*
errp
)
sctp_init_cause
(
*
errp
,
SCTP_ERROR_INV_PARAM
,
NULL
,
0
);
sctp_init_cause
(
*
errp
,
SCTP_ERROR_INV_PARAM
,
0
);
/* Stop processing this chunk. */
return
0
;
...
...
@@ -1595,9 +1630,10 @@ static int sctp_process_inv_paramlength(const struct sctp_association *asoc,
*
errp
=
sctp_make_op_error_space
(
asoc
,
chunk
,
payload_len
);
if
(
*
errp
)
{
sctp_init_cause
(
*
errp
,
SCTP_ERROR_PROTO_VIOLATION
,
error
,
sizeof
(
error
));
sctp_addto_chunk
(
*
errp
,
sizeof
(
sctp_paramhdr_t
),
param
);
sctp_init_cause
(
*
errp
,
SCTP_ERROR_PROTO_VIOLATION
,
sizeof
(
error
)
+
sizeof
(
sctp_paramhdr_t
));
sctp_addto_chunk
(
*
errp
,
sizeof
(
error
),
error
);
sctp_addto_param
(
*
errp
,
sizeof
(
sctp_paramhdr_t
),
param
);
}
return
0
;
...
...
@@ -1618,9 +1654,10 @@ static int sctp_process_hn_param(const struct sctp_association *asoc,
if
(
!*
errp
)
*
errp
=
sctp_make_op_error_space
(
asoc
,
chunk
,
len
);
if
(
*
errp
)
sctp_init_cause
(
*
errp
,
SCTP_ERROR_DNS_FAILED
,
param
.
v
,
len
);
if
(
*
errp
)
{
sctp_init_cause
(
*
errp
,
SCTP_ERROR_DNS_FAILED
,
len
);
sctp_addto_chunk
(
*
errp
,
len
,
param
.
v
);
}
/* Stop processing this chunk. */
return
0
;
...
...
@@ -1672,10 +1709,13 @@ static int sctp_process_unk_param(const struct sctp_association *asoc,
*
errp
=
sctp_make_op_error_space
(
asoc
,
chunk
,
ntohs
(
chunk
->
chunk_hdr
->
length
));
if
(
*
errp
)
if
(
*
errp
)
{
sctp_init_cause
(
*
errp
,
SCTP_ERROR_UNKNOWN_PARAM
,
param
.
v
,
WORD_ROUND
(
ntohs
(
param
.
p
->
length
)));
sctp_addto_chunk
(
*
errp
,
WORD_ROUND
(
ntohs
(
param
.
p
->
length
)),
param
.
v
);
}
break
;
case
SCTP_PARAM_ACTION_SKIP
:
...
...
@@ -1690,8 +1730,10 @@ static int sctp_process_unk_param(const struct sctp_association *asoc,
if
(
*
errp
)
{
sctp_init_cause
(
*
errp
,
SCTP_ERROR_UNKNOWN_PARAM
,
param
.
v
,
WORD_ROUND
(
ntohs
(
param
.
p
->
length
)));
sctp_addto_chunk
(
*
errp
,
WORD_ROUND
(
ntohs
(
param
.
p
->
length
)),
param
.
v
);
}
else
{
/* If there is no memory for generating the ERROR
* report as specified, an ABORT will be triggered
...
...
@@ -1791,7 +1833,7 @@ int sctp_verify_init(const struct sctp_association *asoc,
* VIOLATION error. We build the ERROR chunk here and let the normal
* error handling code build and send the packet.
*/
if
(
param
.
v
<
(
void
*
)
chunk
->
chunk_end
-
sizeof
(
sctp_paramhdr_t
)
)
{
if
(
param
.
v
!=
(
void
*
)
chunk
->
chunk_end
)
{
sctp_process_inv_paramlength
(
asoc
,
param
.
p
,
chunk
,
errp
);
return
0
;
}
...
...
net/sctp/sm_sideeffect.c
View file @
b91ddd84
...
...
@@ -1013,6 +1013,7 @@ static int sctp_side_effects(sctp_event_t event_type, sctp_subtype_t subtype,
break
;
case
SCTP_DISPOSITION_VIOLATION
:
if
(
net_ratelimit
())
printk
(
KERN_ERR
"sctp protocol violation state %d "
"chunkid %d
\n
"
,
state
,
subtype
.
chunk
);
break
;
...
...
@@ -1130,6 +1131,9 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
/* Move the Cumulattive TSN Ack ahead. */
sctp_tsnmap_skip
(
&
asoc
->
peer
.
tsn_map
,
cmd
->
obj
.
u32
);
/* purge the fragmentation queue */
sctp_ulpq_reasm_flushtsn
(
&
asoc
->
ulpq
,
cmd
->
obj
.
u32
);
/* Abort any in progress partial delivery. */
sctp_ulpq_abort_pd
(
&
asoc
->
ulpq
,
GFP_ATOMIC
);
break
;
...
...
net/sctp/sm_statefuns.c
View file @
b91ddd84
...
...
@@ -264,7 +264,6 @@ sctp_disposition_t sctp_sf_do_5_1B_init(const struct sctp_endpoint *ep,
struct
sctp_chunk
*
err_chunk
;
struct
sctp_packet
*
packet
;
sctp_unrecognized_param_t
*
unk_param
;
struct
sock
*
sk
;
int
len
;
/* 6.10 Bundling
...
...
@@ -285,16 +284,6 @@ sctp_disposition_t sctp_sf_do_5_1B_init(const struct sctp_endpoint *ep,
if
(
ep
==
sctp_sk
((
sctp_get_ctl_sock
()))
->
ep
)
return
sctp_sf_tabort_8_4_8
(
ep
,
asoc
,
type
,
arg
,
commands
);
sk
=
ep
->
base
.
sk
;
/* If the endpoint is not listening or if the number of associations
* on the TCP-style socket exceed the max backlog, respond with an
* ABORT.
*/
if
(
!
sctp_sstate
(
sk
,
LISTENING
)
||
(
sctp_style
(
sk
,
TCP
)
&&
sk_acceptq_is_full
(
sk
)))
return
sctp_sf_tabort_8_4_8
(
ep
,
asoc
,
type
,
arg
,
commands
);
/* 3.1 A packet containing an INIT chunk MUST have a zero Verification
* Tag.
*/
...
...
@@ -590,6 +579,7 @@ sctp_disposition_t sctp_sf_do_5_1D_ce(const struct sctp_endpoint *ep,
struct
sctp_ulpevent
*
ev
,
*
ai_ev
=
NULL
;
int
error
=
0
;
struct
sctp_chunk
*
err_chk_p
;
struct
sock
*
sk
;
/* If the packet is an OOTB packet which is temporarily on the
* control endpoint, respond with an ABORT.
...
...
@@ -605,6 +595,15 @@ sctp_disposition_t sctp_sf_do_5_1D_ce(const struct sctp_endpoint *ep,
if
(
!
sctp_chunk_length_valid
(
chunk
,
sizeof
(
sctp_chunkhdr_t
)))
return
sctp_sf_pdiscard
(
ep
,
asoc
,
type
,
arg
,
commands
);
/* If the endpoint is not listening or if the number of associations
* on the TCP-style socket exceed the max backlog, respond with an
* ABORT.
*/
sk
=
ep
->
base
.
sk
;
if
(
!
sctp_sstate
(
sk
,
LISTENING
)
||
(
sctp_style
(
sk
,
TCP
)
&&
sk_acceptq_is_full
(
sk
)))
return
sctp_sf_tabort_8_4_8
(
ep
,
asoc
,
type
,
arg
,
commands
);
/* "Decode" the chunk. We have no optional parameters so we
* are in good shape.
*/
...
...
@@ -1032,6 +1031,7 @@ sctp_disposition_t sctp_sf_backbeat_8_3(const struct sctp_endpoint *ep,
/* This should never happen, but lets log it if so. */
if
(
unlikely
(
!
link
))
{
if
(
from_addr
.
sa
.
sa_family
==
AF_INET6
)
{
if
(
net_ratelimit
())
printk
(
KERN_WARNING
"%s association %p could not find address "
NIP6_FMT
"
\n
"
,
...
...
@@ -1039,6 +1039,7 @@ sctp_disposition_t sctp_sf_backbeat_8_3(const struct sctp_endpoint *ep,
asoc
,
NIP6
(
from_addr
.
v6
.
sin6_addr
));
}
else
{
if
(
net_ratelimit
())
printk
(
KERN_WARNING
"%s association %p could not find address "
NIPQUAD_FMT
"
\n
"
,
...
...
@@ -3362,7 +3363,7 @@ sctp_disposition_t sctp_sf_do_asconf_ack(const struct sctp_endpoint *ep,
abort
=
sctp_make_abort
(
asoc
,
asconf_ack
,
sizeof
(
sctp_errhdr_t
));
if
(
abort
)
{
sctp_init_cause
(
abort
,
SCTP_ERROR_ASCONF_ACK
,
NULL
,
0
);
sctp_init_cause
(
abort
,
SCTP_ERROR_ASCONF_ACK
,
0
);
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_REPLY
,
SCTP_CHUNK
(
abort
));
}
...
...
@@ -3392,7 +3393,7 @@ sctp_disposition_t sctp_sf_do_asconf_ack(const struct sctp_endpoint *ep,
abort
=
sctp_make_abort
(
asoc
,
asconf_ack
,
sizeof
(
sctp_errhdr_t
));
if
(
abort
)
{
sctp_init_cause
(
abort
,
SCTP_ERROR_RSRC_LOW
,
NULL
,
0
);
sctp_init_cause
(
abort
,
SCTP_ERROR_RSRC_LOW
,
0
);
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_REPLY
,
SCTP_CHUNK
(
abort
));
}
...
...
net/sctp/socket.c
View file @
b91ddd84
...
...
@@ -353,6 +353,7 @@ SCTP_STATIC int sctp_do_bind(struct sock *sk, union sctp_addr *addr, int len)
* The function sctp_get_port_local() does duplicate address
* detection.
*/
addr
->
v4
.
sin_port
=
htons
(
snum
);
if
((
ret
=
sctp_get_port_local
(
sk
,
addr
)))
{
if
(
ret
==
(
long
)
sk
)
{
/* This endpoint has a conflicting address. */
...
...
@@ -5202,6 +5203,7 @@ SCTP_STATIC int sctp_seqpacket_listen(struct sock *sk, int backlog)
sctp_unhash_endpoint
(
ep
);
sk
->
sk_state
=
SCTP_SS_CLOSED
;
return
0
;
}
/* Return if we are already listening. */
...
...
@@ -5249,6 +5251,7 @@ SCTP_STATIC int sctp_stream_listen(struct sock *sk, int backlog)
sctp_unhash_endpoint
(
ep
);
sk
->
sk_state
=
SCTP_SS_CLOSED
;
return
0
;
}
if
(
sctp_sstate
(
sk
,
LISTENING
))
...
...
net/sctp/ulpqueue.c
View file @
b91ddd84
...
...
@@ -659,6 +659,46 @@ static inline struct sctp_ulpevent *sctp_ulpq_retrieve_first(struct sctp_ulpq *u
return
retval
;
}
/*
* Flush out stale fragments from the reassembly queue when processing
* a Forward TSN.
*
* RFC 3758, Section 3.6
*
* After receiving and processing a FORWARD TSN, the data receiver MUST
* take cautions in updating its re-assembly queue. The receiver MUST
* remove any partially reassembled message, which is still missing one
* or more TSNs earlier than or equal to the new cumulative TSN point.
* In the event that the receiver has invoked the partial delivery API,
* a notification SHOULD also be generated to inform the upper layer API
* that the message being partially delivered will NOT be completed.
*/
void
sctp_ulpq_reasm_flushtsn
(
struct
sctp_ulpq
*
ulpq
,
__u32
fwd_tsn
)
{
struct
sk_buff
*
pos
,
*
tmp
;
struct
sctp_ulpevent
*
event
;
__u32
tsn
;
if
(
skb_queue_empty
(
&
ulpq
->
reasm
))
return
;
skb_queue_walk_safe
(
&
ulpq
->
reasm
,
pos
,
tmp
)
{
event
=
sctp_skb2event
(
pos
);
tsn
=
event
->
tsn
;
/* Since the entire message must be abandoned by the
* sender (item A3 in Section 3.5, RFC 3758), we can
* free all fragments on the list that are less then
* or equal to ctsn_point
*/
if
(
TSN_lte
(
tsn
,
fwd_tsn
))
{
__skb_unlink
(
pos
,
&
ulpq
->
reasm
);
sctp_ulpevent_free
(
event
);
}
else
break
;
}
}
/* Helper function to gather skbs that have possibly become
* ordered by an an incoming chunk.
*/
...
...
@@ -794,7 +834,7 @@ static struct sctp_ulpevent *sctp_ulpq_order(struct sctp_ulpq *ulpq,
/* Helper function to gather skbs that have possibly become
* ordered by forward tsn skipping their dependencies.
*/
static
inline
void
sctp_ulpq_reap_ordered
(
struct
sctp_ulpq
*
ulpq
)
static
inline
void
sctp_ulpq_reap_ordered
(
struct
sctp_ulpq
*
ulpq
,
__u16
sid
)
{
struct
sk_buff
*
pos
,
*
tmp
;
struct
sctp_ulpevent
*
cevent
;
...
...
@@ -813,31 +853,40 @@ static inline void sctp_ulpq_reap_ordered(struct sctp_ulpq *ulpq)
csid
=
cevent
->
stream
;
cssn
=
cevent
->
ssn
;
if
(
cssn
!=
sctp_ssn_peek
(
in
,
csid
))
/* Have we gone too far? */
if
(
csid
>
sid
)
break
;
/* Found it, so mark in the ssnmap. */
sctp_ssn_next
(
in
,
csid
);
/* Have we not gone far enough? */
if
(
csid
<
sid
)
continue
;
/* see if this ssn has been marked by skipping */
if
(
!
SSN_lt
(
cssn
,
sctp_ssn_peek
(
in
,
csid
)))
break
;
__skb_unlink
(
pos
,
&
ulpq
->
lobby
);
if
(
!
event
)
{
if
(
!
event
)
/* Create a temporary list to collect chunks on. */
event
=
sctp_skb2event
(
pos
);
__skb_queue_tail
(
&
temp
,
sctp_event2skb
(
event
));
}
else
{
/* Attach all gathered skbs to the event. */
__skb_queue_tail
(
&
temp
,
pos
);
}
}
/* Send event to the ULP. 'event' is the sctp_ulpevent for
* very first SKB on the 'temp' list.
*/
if
(
event
)
if
(
event
)
{
/* see if we have more ordered that we can deliver */
sctp_ulpq_retrieve_ordered
(
ulpq
,
event
);
sctp_ulpq_tail_event
(
ulpq
,
event
);
}
}
/* Skip over an SSN. */
/* Skip over an SSN. This is used during the processing of
* Forwared TSN chunk to skip over the abandoned ordered data
*/
void
sctp_ulpq_skip
(
struct
sctp_ulpq
*
ulpq
,
__u16
sid
,
__u16
ssn
)
{
struct
sctp_stream
*
in
;
...
...
@@ -855,7 +904,7 @@ void sctp_ulpq_skip(struct sctp_ulpq *ulpq, __u16 sid, __u16 ssn)
/* Go find any other chunks that were waiting for
* ordering and deliver them if needed.
*/
sctp_ulpq_reap_ordered
(
ulpq
);
sctp_ulpq_reap_ordered
(
ulpq
,
sid
);
return
;
}
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment