Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
f8436e9f
Commit
f8436e9f
authored
Jul 24, 2004
by
David S. Miller
Browse files
Options
Browse Files
Download
Plain Diff
Merge conflicts with Yoshfuji's SNMP stats changes.
parents
57e3907f
a2ba8aa8
Changes
9
Show whitespace changes
Inline
Side-by-side
Showing
9 changed files
with
355 additions
and
280 deletions
+355
-280
include/net/sctp/command.h
include/net/sctp/command.h
+3
-0
include/net/sctp/constants.h
include/net/sctp/constants.h
+4
-0
include/net/sctp/sm.h
include/net/sctp/sm.h
+3
-0
net/sctp/associola.c
net/sctp/associola.c
+1
-0
net/sctp/outqueue.c
net/sctp/outqueue.c
+20
-2
net/sctp/sm_make_chunk.c
net/sctp/sm_make_chunk.c
+2
-3
net/sctp/sm_sideeffect.c
net/sctp/sm_sideeffect.c
+71
-0
net/sctp/sm_statefuns.c
net/sctp/sm_statefuns.c
+213
-275
net/sctp/socket.c
net/sctp/socket.c
+38
-0
No files found.
include/net/sctp/command.h
View file @
f8436e9f
...
@@ -94,6 +94,9 @@ typedef enum {
...
@@ -94,6 +94,9 @@ typedef enum {
SCTP_CMD_REPORT_FWDTSN
,
/* Report new cumulative TSN Ack. */
SCTP_CMD_REPORT_FWDTSN
,
/* Report new cumulative TSN Ack. */
SCTP_CMD_PROCESS_FWDTSN
,
/* Skips were reported, so process further. */
SCTP_CMD_PROCESS_FWDTSN
,
/* Skips were reported, so process further. */
SCTP_CMD_CLEAR_INIT_TAG
,
/* Clears association peer's inittag. */
SCTP_CMD_CLEAR_INIT_TAG
,
/* Clears association peer's inittag. */
SCTP_CMD_DEL_NON_PRIMARY
,
/* Removes non-primary peer transports. */
SCTP_CMD_T3_RTX_TIMERS_STOP
,
/* Stops T3-rtx pending timers */
SCTP_CMD_FORCE_PRIM_RETRAN
,
/* Forces retrans. over primary path. */
SCTP_CMD_LAST
SCTP_CMD_LAST
}
sctp_verb_t
;
}
sctp_verb_t
;
...
...
include/net/sctp/constants.h
View file @
f8436e9f
...
@@ -175,6 +175,10 @@ typedef enum {
...
@@ -175,6 +175,10 @@ typedef enum {
SCTP_IERROR_BAD_TAG
,
SCTP_IERROR_BAD_TAG
,
SCTP_IERROR_BIG_GAP
,
SCTP_IERROR_BIG_GAP
,
SCTP_IERROR_DUP_TSN
,
SCTP_IERROR_DUP_TSN
,
SCTP_IERROR_HIGH_TSN
,
SCTP_IERROR_IGNORE_TSN
,
SCTP_IERROR_NO_DATA
,
SCTP_IERROR_BAD_STREAM
,
}
sctp_ierror_t
;
}
sctp_ierror_t
;
...
...
include/net/sctp/sm.h
View file @
f8436e9f
...
@@ -322,6 +322,9 @@ void sctp_send_stale_cookie_err(const struct sctp_endpoint *ep,
...
@@ -322,6 +322,9 @@ void sctp_send_stale_cookie_err(const struct sctp_endpoint *ep,
const
struct
sctp_chunk
*
chunk
,
const
struct
sctp_chunk
*
chunk
,
sctp_cmd_seq_t
*
commands
,
sctp_cmd_seq_t
*
commands
,
struct
sctp_chunk
*
err_chunk
);
struct
sctp_chunk
*
err_chunk
);
int
sctp_eat_data
(
const
struct
sctp_association
*
asoc
,
struct
sctp_chunk
*
chunk
,
sctp_cmd_seq_t
*
commands
);
/* 3rd level prototypes */
/* 3rd level prototypes */
__u32
sctp_generate_tag
(
const
struct
sctp_endpoint
*
);
__u32
sctp_generate_tag
(
const
struct
sctp_endpoint
*
);
...
...
net/sctp/associola.c
View file @
f8436e9f
...
@@ -1093,6 +1093,7 @@ static inline int sctp_peer_needs_update(struct sctp_association *asoc)
...
@@ -1093,6 +1093,7 @@ static inline int sctp_peer_needs_update(struct sctp_association *asoc)
case
SCTP_STATE_ESTABLISHED
:
case
SCTP_STATE_ESTABLISHED
:
case
SCTP_STATE_SHUTDOWN_PENDING
:
case
SCTP_STATE_SHUTDOWN_PENDING
:
case
SCTP_STATE_SHUTDOWN_RECEIVED
:
case
SCTP_STATE_SHUTDOWN_RECEIVED
:
case
SCTP_STATE_SHUTDOWN_SENT
:
if
((
asoc
->
rwnd
>
asoc
->
a_rwnd
)
&&
if
((
asoc
->
rwnd
>
asoc
->
a_rwnd
)
&&
((
asoc
->
rwnd
-
asoc
->
a_rwnd
)
>=
((
asoc
->
rwnd
-
asoc
->
a_rwnd
)
>=
min_t
(
__u32
,
(
asoc
->
base
.
sk
->
sk_rcvbuf
>>
1
),
asoc
->
pmtu
)))
min_t
(
__u32
,
(
asoc
->
base
.
sk
->
sk_rcvbuf
>>
1
),
asoc
->
pmtu
)))
...
...
net/sctp/outqueue.c
View file @
f8436e9f
...
@@ -525,10 +525,10 @@ static int sctp_outq_flush_rtx(struct sctp_outq *q, struct sctp_packet *pkt,
...
@@ -525,10 +525,10 @@ static int sctp_outq_flush_rtx(struct sctp_outq *q, struct sctp_packet *pkt,
int
rtx_timeout
,
int
*
start_timer
)
int
rtx_timeout
,
int
*
start_timer
)
{
{
struct
list_head
*
lqueue
;
struct
list_head
*
lqueue
;
struct
list_head
*
lchunk
;
struct
list_head
*
lchunk
,
*
lchunk1
;
struct
sctp_transport
*
transport
=
pkt
->
transport
;
struct
sctp_transport
*
transport
=
pkt
->
transport
;
sctp_xmit_t
status
;
sctp_xmit_t
status
;
struct
sctp_chunk
*
chunk
;
struct
sctp_chunk
*
chunk
,
*
chunk1
;
struct
sctp_association
*
asoc
;
struct
sctp_association
*
asoc
;
int
error
=
0
;
int
error
=
0
;
...
@@ -615,6 +615,12 @@ static int sctp_outq_flush_rtx(struct sctp_outq *q, struct sctp_packet *pkt,
...
@@ -615,6 +615,12 @@ static int sctp_outq_flush_rtx(struct sctp_outq *q, struct sctp_packet *pkt,
* the transmitted list.
* the transmitted list.
*/
*/
list_add_tail
(
lchunk
,
&
transport
->
transmitted
);
list_add_tail
(
lchunk
,
&
transport
->
transmitted
);
/* Mark the chunk as ineligible for fast retransmit
* after it is retransmitted.
*/
chunk
->
fast_retransmit
=
0
;
*
start_timer
=
1
;
*
start_timer
=
1
;
q
->
empty
=
0
;
q
->
empty
=
0
;
...
@@ -622,6 +628,18 @@ static int sctp_outq_flush_rtx(struct sctp_outq *q, struct sctp_packet *pkt,
...
@@ -622,6 +628,18 @@ static int sctp_outq_flush_rtx(struct sctp_outq *q, struct sctp_packet *pkt,
lchunk
=
sctp_list_dequeue
(
lqueue
);
lchunk
=
sctp_list_dequeue
(
lqueue
);
break
;
break
;
};
};
/* If we are here due to a retransmit timeout or a fast
* retransmit and if there are any chunks left in the retransmit
* queue that could not fit in the PMTU sized packet, they need * to be marked as ineligible for a subsequent fast retransmit.
*/
if
(
rtx_timeout
&&
!
lchunk
)
{
list_for_each
(
lchunk1
,
lqueue
)
{
chunk1
=
list_entry
(
lchunk1
,
struct
sctp_chunk
,
transmitted_list
);
chunk1
->
fast_retransmit
=
0
;
}
}
}
}
return
error
;
return
error
;
...
...
net/sctp/sm_make_chunk.c
View file @
f8436e9f
...
@@ -1846,8 +1846,7 @@ int sctp_process_init(struct sctp_association *asoc, sctp_cid_t cid,
...
@@ -1846,8 +1846,7 @@ int sctp_process_init(struct sctp_association *asoc, sctp_cid_t cid,
if
(
unlikely
(
!
idr_pre_get
(
&
sctp_assocs_id
,
gfp
)))
if
(
unlikely
(
!
idr_pre_get
(
&
sctp_assocs_id
,
gfp
)))
goto
clean_up
;
goto
clean_up
;
spin_lock_bh
(
&
sctp_assocs_id_lock
);
spin_lock_bh
(
&
sctp_assocs_id_lock
);
error
=
idr_get_new
(
&
sctp_assocs_id
,
error
=
idr_get_new_above
(
&
sctp_assocs_id
,
(
void
*
)
asoc
,
1
,
(
void
*
)
asoc
,
&
assoc_id
);
&
assoc_id
);
spin_unlock_bh
(
&
sctp_assocs_id_lock
);
spin_unlock_bh
(
&
sctp_assocs_id_lock
);
if
(
error
==
-
EAGAIN
)
if
(
error
==
-
EAGAIN
)
...
...
net/sctp/sm_sideeffect.c
View file @
f8436e9f
...
@@ -529,6 +529,23 @@ static void sctp_cmd_hb_timers_stop(sctp_cmd_seq_t *cmds,
...
@@ -529,6 +529,23 @@ static void sctp_cmd_hb_timers_stop(sctp_cmd_seq_t *cmds,
}
}
}
}
/* Helper function to stop any pending T3-RTX timers */
static
void
sctp_cmd_t3_rtx_timers_stop
(
sctp_cmd_seq_t
*
cmds
,
struct
sctp_association
*
asoc
)
{
struct
sctp_transport
*
t
;
struct
list_head
*
pos
;
list_for_each
(
pos
,
&
asoc
->
peer
.
transport_addr_list
)
{
t
=
list_entry
(
pos
,
struct
sctp_transport
,
transports
);
if
(
timer_pending
(
&
t
->
T3_rtx_timer
)
&&
del_timer
(
&
t
->
T3_rtx_timer
))
{
sctp_transport_put
(
t
);
}
}
}
/* Helper function to update the heartbeat timer. */
/* Helper function to update the heartbeat timer. */
static
void
sctp_cmd_hb_timer_update
(
sctp_cmd_seq_t
*
cmds
,
static
void
sctp_cmd_hb_timer_update
(
sctp_cmd_seq_t
*
cmds
,
struct
sctp_association
*
asoc
,
struct
sctp_association
*
asoc
,
...
@@ -749,6 +766,26 @@ static void sctp_cmd_process_fwdtsn(struct sctp_ulpq *ulpq,
...
@@ -749,6 +766,26 @@ static void sctp_cmd_process_fwdtsn(struct sctp_ulpq *ulpq,
return
;
return
;
}
}
/* Helper function to remove the association non-primary peer
* transports.
*/
static
void
sctp_cmd_del_non_primary
(
struct
sctp_association
*
asoc
)
{
struct
sctp_transport
*
t
;
struct
list_head
*
pos
;
struct
list_head
*
temp
;
list_for_each_safe
(
pos
,
temp
,
&
asoc
->
peer
.
transport_addr_list
)
{
t
=
list_entry
(
pos
,
struct
sctp_transport
,
transports
);
if
(
!
sctp_cmp_addr_exact
(
&
t
->
ipaddr
,
&
asoc
->
peer
.
primary_addr
))
{
sctp_assoc_del_peer
(
asoc
,
&
t
->
ipaddr
);
}
}
return
;
}
/* These three macros allow us to pull the debugging code out of the
/* These three macros allow us to pull the debugging code out of the
* main flow of sctp_do_sm() to keep attention focused on the real
* main flow of sctp_do_sm() to keep attention focused on the real
* functionality there.
* functionality there.
...
@@ -1048,6 +1085,27 @@ int sctp_cmd_interpreter(sctp_event_t event_type, sctp_subtype_t subtype,
...
@@ -1048,6 +1085,27 @@ int sctp_cmd_interpreter(sctp_event_t event_type, sctp_subtype_t subtype,
if
(
cmd
->
obj
.
ptr
)
if
(
cmd
->
obj
.
ptr
)
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_REPLY
,
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_REPLY
,
SCTP_CHUNK
(
cmd
->
obj
.
ptr
));
SCTP_CHUNK
(
cmd
->
obj
.
ptr
));
/* FIXME - Eventually come up with a cleaner way to
* enabling COOKIE-ECHO + DATA bundling during
* multihoming stale cookie scenarios, the following
* command plays with asoc->peer.retran_path to
* avoid the problem of sending the COOKIE-ECHO and
* DATA in different paths, which could result
* in the association being ABORTed if the DATA chunk
* is processed first by the server. Checking the
* init error counter simply causes this command
* to be executed only during failed attempts of
* association establishment.
*/
if
((
asoc
->
peer
.
retran_path
!=
asoc
->
peer
.
primary_path
)
&&
(
asoc
->
counters
[
SCTP_COUNTER_INIT_ERROR
]
>
0
))
{
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_FORCE_PRIM_RETRAN
,
SCTP_NULL
());
}
break
;
break
;
case
SCTP_CMD_GEN_SHUTDOWN
:
case
SCTP_CMD_GEN_SHUTDOWN
:
...
@@ -1282,6 +1340,19 @@ int sctp_cmd_interpreter(sctp_event_t event_type, sctp_subtype_t subtype,
...
@@ -1282,6 +1340,19 @@ int sctp_cmd_interpreter(sctp_event_t event_type, sctp_subtype_t subtype,
case
SCTP_CMD_CLEAR_INIT_TAG
:
case
SCTP_CMD_CLEAR_INIT_TAG
:
asoc
->
peer
.
i
.
init_tag
=
0
;
asoc
->
peer
.
i
.
init_tag
=
0
;
break
;
break
;
case
SCTP_CMD_DEL_NON_PRIMARY
:
sctp_cmd_del_non_primary
(
asoc
);
break
;
case
SCTP_CMD_T3_RTX_TIMERS_STOP
:
sctp_cmd_t3_rtx_timers_stop
(
commands
,
asoc
);
break
;
case
SCTP_CMD_FORCE_PRIM_RETRAN
:
t
=
asoc
->
peer
.
retran_path
;
asoc
->
peer
.
retran_path
=
asoc
->
peer
.
primary_path
;
error
=
sctp_outq_uncork
(
&
asoc
->
outqueue
);
local_cork
=
0
;
asoc
->
peer
.
retran_path
=
t
;
break
;
default:
default:
printk
(
KERN_WARNING
"Impossible command: %u, %p
\n
"
,
printk
(
KERN_WARNING
"Impossible command: %u, %p
\n
"
,
cmd
->
verb
,
cmd
->
obj
.
ptr
);
cmd
->
verb
,
cmd
->
obj
.
ptr
);
...
...
net/sctp/sm_statefuns.c
View file @
f8436e9f
...
@@ -472,8 +472,6 @@ sctp_disposition_t sctp_sf_do_5_1C_ack(const struct sctp_endpoint *ep,
...
@@ -472,8 +472,6 @@ sctp_disposition_t sctp_sf_do_5_1C_ack(const struct sctp_endpoint *ep,
*/
*/
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_TIMER_STOP
,
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_TIMER_STOP
,
SCTP_TO
(
SCTP_EVENT_TIMEOUT_T1_INIT
));
SCTP_TO
(
SCTP_EVENT_TIMEOUT_T1_INIT
));
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_COUNTER_RESET
,
SCTP_COUNTER
(
SCTP_COUNTER_INIT_ERROR
));
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_TIMER_START
,
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_TIMER_START
,
SCTP_TO
(
SCTP_EVENT_TIMEOUT_T1_COOKIE
));
SCTP_TO
(
SCTP_EVENT_TIMEOUT_T1_COOKIE
));
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_NEW_STATE
,
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_NEW_STATE
,
...
@@ -674,6 +672,15 @@ sctp_disposition_t sctp_sf_do_5_1E_ca(const struct sctp_endpoint *ep,
...
@@ -674,6 +672,15 @@ sctp_disposition_t sctp_sf_do_5_1E_ca(const struct sctp_endpoint *ep,
if
(
!
sctp_vtag_verify
(
chunk
,
asoc
))
if
(
!
sctp_vtag_verify
(
chunk
,
asoc
))
return
sctp_sf_pdiscard
(
ep
,
asoc
,
type
,
arg
,
commands
);
return
sctp_sf_pdiscard
(
ep
,
asoc
,
type
,
arg
,
commands
);
/* Reset init error count upon receipt of COOKIE-ACK,
* to avoid problems with the managemement of this
* counter in stale cookie situations when a transition back
* from the COOKIE-ECHOED state to the COOKIE-WAIT
* state is performed.
*/
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_COUNTER_RESET
,
SCTP_COUNTER
(
SCTP_COUNTER_INIT_ERROR
));
/* RFC 2960 5.1 Normal Establishment of an Association
/* RFC 2960 5.1 Normal Establishment of an Association
*
*
* E) Upon reception of the COOKIE ACK, endpoint "A" will move
* E) Upon reception of the COOKIE ACK, endpoint "A" will move
...
@@ -1872,8 +1879,6 @@ sctp_disposition_t sctp_sf_do_5_2_6_stale(const struct sctp_endpoint *ep,
...
@@ -1872,8 +1879,6 @@ sctp_disposition_t sctp_sf_do_5_2_6_stale(const struct sctp_endpoint *ep,
time_t
stale
;
time_t
stale
;
sctp_cookie_preserve_param_t
bht
;
sctp_cookie_preserve_param_t
bht
;
sctp_errhdr_t
*
err
;
sctp_errhdr_t
*
err
;
struct
list_head
*
pos
;
struct
sctp_transport
*
t
;
struct
sctp_chunk
*
reply
;
struct
sctp_chunk
*
reply
;
struct
sctp_bind_addr
*
bp
;
struct
sctp_bind_addr
*
bp
;
int
attempts
;
int
attempts
;
...
@@ -1920,20 +1925,27 @@ sctp_disposition_t sctp_sf_do_5_2_6_stale(const struct sctp_endpoint *ep,
...
@@ -1920,20 +1925,27 @@ sctp_disposition_t sctp_sf_do_5_2_6_stale(const struct sctp_endpoint *ep,
/* Clear peer's init_tag cached in assoc as we are sending a new INIT */
/* Clear peer's init_tag cached in assoc as we are sending a new INIT */
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_CLEAR_INIT_TAG
,
SCTP_NULL
());
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_CLEAR_INIT_TAG
,
SCTP_NULL
());
/* Stop pending T3-rtx and heartbeat timers */
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_T3_RTX_TIMERS_STOP
,
SCTP_NULL
());
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_HB_TIMERS_STOP
,
SCTP_NULL
());
/* Delete non-primary peer ip addresses since we are transitioning
* back to the COOKIE-WAIT state
*/
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_DEL_NON_PRIMARY
,
SCTP_NULL
());
/* If we've sent any data bundled with COOKIE-ECHO we will need to
* resend
*/
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_RETRAN
,
SCTP_TRANSPORT
(
asoc
->
peer
.
primary_path
));
/* Cast away the const modifier, as we want to just
/* Cast away the const modifier, as we want to just
* rerun it through as a sideffect.
* rerun it through as a sideffect.
*/
*/
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_COUNTER_INC
,
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_COUNTER_INC
,
SCTP_COUNTER
(
SCTP_COUNTER_INIT_ERROR
));
SCTP_COUNTER
(
SCTP_COUNTER_INIT_ERROR
));
/* If we've sent any data bundled with COOKIE-ECHO we need to
* resend.
*/
list_for_each
(
pos
,
&
asoc
->
peer
.
transport_addr_list
)
{
t
=
list_entry
(
pos
,
struct
sctp_transport
,
transports
);
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_RETRAN
,
SCTP_TRANSPORT
(
t
));
}
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_TIMER_STOP
,
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_TIMER_STOP
,
SCTP_TO
(
SCTP_EVENT_TIMEOUT_T1_COOKIE
));
SCTP_TO
(
SCTP_EVENT_TIMEOUT_T1_COOKIE
));
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_NEW_STATE
,
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_NEW_STATE
,
...
@@ -2321,12 +2333,7 @@ sctp_disposition_t sctp_sf_eat_data_6_2(const struct sctp_endpoint *ep,
...
@@ -2321,12 +2333,7 @@ sctp_disposition_t sctp_sf_eat_data_6_2(const struct sctp_endpoint *ep,
sctp_cmd_seq_t
*
commands
)
sctp_cmd_seq_t
*
commands
)
{
{
struct
sctp_chunk
*
chunk
=
arg
;
struct
sctp_chunk
*
chunk
=
arg
;
sctp_datahdr_t
*
data_hdr
;
int
error
;
struct
sctp_chunk
*
err
;
size_t
datalen
;
sctp_verb_t
deliver
;
int
tmp
;
__u32
tsn
;
if
(
!
sctp_vtag_verify
(
chunk
,
asoc
))
{
if
(
!
sctp_vtag_verify
(
chunk
,
asoc
))
{
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_REPORT_BAD_TAG
,
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_REPORT_BAD_TAG
,
...
@@ -2334,157 +2341,21 @@ sctp_disposition_t sctp_sf_eat_data_6_2(const struct sctp_endpoint *ep,
...
@@ -2334,157 +2341,21 @@ sctp_disposition_t sctp_sf_eat_data_6_2(const struct sctp_endpoint *ep,
return
sctp_sf_pdiscard
(
ep
,
asoc
,
type
,
arg
,
commands
);
return
sctp_sf_pdiscard
(
ep
,
asoc
,
type
,
arg
,
commands
);
}
}
data_hdr
=
chunk
->
subh
.
data_hdr
=
(
sctp_datahdr_t
*
)
chunk
->
skb
->
data
;
error
=
sctp_eat_data
(
asoc
,
chunk
,
commands
);
skb_pull
(
chunk
->
skb
,
sizeof
(
sctp_datahdr_t
));
switch
(
error
)
{
case
SCTP_IERROR_NO_ERROR
:
tsn
=
ntohl
(
data_hdr
->
tsn
);
break
;
SCTP_DEBUG_PRINTK
(
"eat_data: TSN 0x%x.
\n
"
,
tsn
);
case
SCTP_IERROR_HIGH_TSN
:
case
SCTP_IERROR_BAD_STREAM
:
/* ASSERT: Now skb->data is really the user data. */
/* Process ECN based congestion.
*
* Since the chunk structure is reused for all chunks within
* a packet, we use ecn_ce_done to track if we've already
* done CE processing for this packet.
*
* We need to do ECN processing even if we plan to discard the
* chunk later.
*/
if
(
!
chunk
->
ecn_ce_done
)
{
struct
sctp_af
*
af
;
chunk
->
ecn_ce_done
=
1
;
af
=
sctp_get_af_specific
(
ipver2af
(
chunk
->
skb
->
nh
.
iph
->
version
));
if
(
af
&&
af
->
is_ce
(
chunk
->
skb
)
&&
asoc
->
peer
.
ecn_capable
)
{
/* Do real work as sideffect. */
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_ECN_CE
,
SCTP_U32
(
tsn
));
}
}
tmp
=
sctp_tsnmap_check
(
&
asoc
->
peer
.
tsn_map
,
tsn
);
if
(
tmp
<
0
)
{
/* The TSN is too high--silently discard the chunk and
* count on it getting retransmitted later.
*/
goto
discard_noforce
;
goto
discard_noforce
;
}
else
if
(
tmp
>
0
)
{
case
SCTP_IERROR_DUP_TSN
:
/* This is a duplicate. Record it. */
case
SCTP_IERROR_IGNORE_TSN
:
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_REPORT_DUP
,
SCTP_U32
(
tsn
));
goto
discard_force
;
}
/* This is a new TSN. */
/* Discard if there is no room in the receive window.
* Actually, allow a little bit of overflow (up to a MTU).
*/
datalen
=
ntohs
(
chunk
->
chunk_hdr
->
length
);
datalen
-=
sizeof
(
sctp_data_chunk_t
);
deliver
=
SCTP_CMD_CHUNK_ULP
;
/* Think about partial delivery. */
if
((
datalen
>=
asoc
->
rwnd
)
&&
(
!
asoc
->
ulpq
.
pd_mode
))
{
/* Even if we don't accept this chunk there is
* memory pressure.
*/
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_PART_DELIVER
,
SCTP_NULL
());
}
/* Spill over rwnd a little bit. Note: While allowed, this spill over
* seems a bit troublesome in that frag_point varies based on
* PMTU. In cases, such as loopback, this might be a rather
* large spill over.
*/
if
(
!
asoc
->
rwnd
||
asoc
->
rwnd_over
||
(
datalen
>
asoc
->
rwnd
+
asoc
->
frag_point
))
{
/* If this is the next TSN, consider reneging to make
* room. Note: Playing nice with a confused sender. A
* malicious sender can still eat up all our buffer
* space and in the future we may want to detect and
* do more drastic reneging.
*/
if
(
sctp_tsnmap_has_gap
(
&
asoc
->
peer
.
tsn_map
)
&&
(
sctp_tsnmap_get_ctsn
(
&
asoc
->
peer
.
tsn_map
)
+
1
)
==
tsn
)
{
SCTP_DEBUG_PRINTK
(
"Reneging for tsn:%u
\n
"
,
tsn
);
deliver
=
SCTP_CMD_RENEGE
;
}
else
{
SCTP_DEBUG_PRINTK
(
"Discard tsn: %u len: %Zd, "
"rwnd: %d
\n
"
,
tsn
,
datalen
,
asoc
->
rwnd
);
goto
discard_force
;
goto
discard_force
;
case
SCTP_IERROR_NO_DATA
:
goto
consume
;
default:
BUG
();
}
}
}
/*
* Section 3.3.10.9 No User Data (9)
*
* Cause of error
* ---------------
* No User Data: This error cause is returned to the originator of a
* DATA chunk if a received DATA chunk has no user data.
*/
if
(
unlikely
(
0
==
datalen
))
{
err
=
sctp_make_abort_no_data
(
asoc
,
chunk
,
tsn
);
if
(
err
)
{
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_REPLY
,
SCTP_CHUNK
(
err
));
}
/* We are going to ABORT, so we might as well stop
* processing the rest of the chunks in the packet.
*/
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_DISCARD_PACKET
,
SCTP_NULL
());
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_ASSOC_FAILED
,
SCTP_U32
(
SCTP_ERROR_NO_DATA
));
SCTP_INC_STATS
(
SCTP_MIB_ABORTEDS
);
SCTP_DEC_STATS
(
SCTP_MIB_CURRESTAB
);
return
SCTP_DISPOSITION_CONSUME
;
}
/* If definately accepting the DATA chunk, record its TSN, otherwise
* wait for renege processing.
*/
if
(
SCTP_CMD_CHUNK_ULP
==
deliver
)
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_REPORT_TSN
,
SCTP_U32
(
tsn
));
/* Note: Some chunks may get overcounted (if we drop) or overcounted
* if we renege and the chunk arrives again.
*/
if
(
chunk
->
chunk_hdr
->
flags
&
SCTP_DATA_UNORDERED
)
SCTP_INC_STATS
(
SCTP_MIB_INUNORDERCHUNKS
);
else
SCTP_INC_STATS
(
SCTP_MIB_INORDERCHUNKS
);
/* RFC 2960 6.5 Stream Identifier and Stream Sequence Number
*
* If an endpoint receive a DATA chunk with an invalid stream
* identifier, it shall acknowledge the reception of the DATA chunk
* following the normal procedure, immediately send an ERROR chunk
* with cause set to "Invalid Stream Identifier" (See Section 3.3.10)
* and discard the DATA chunk.
*/
if
(
ntohs
(
data_hdr
->
stream
)
>=
asoc
->
c
.
sinit_max_instreams
)
{
err
=
sctp_make_op_error
(
asoc
,
chunk
,
SCTP_ERROR_INV_STRM
,
&
data_hdr
->
stream
,
sizeof
(
data_hdr
->
stream
));
if
(
err
)
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_REPLY
,
SCTP_CHUNK
(
err
));
goto
discard_noforce
;
}
/* Send the data up to the user. Note: Schedule the
* SCTP_CMD_CHUNK_ULP cmd before the SCTP_CMD_GEN_SACK, as the SACK
* chunk needs the updated rwnd.
*/
sctp_add_cmd_sf
(
commands
,
deliver
,
SCTP_CHUNK
(
chunk
));
if
(
asoc
->
autoclose
)
{
if
(
asoc
->
autoclose
)
{
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_TIMER_RESTART
,
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_TIMER_RESTART
,
...
@@ -2551,6 +2422,9 @@ sctp_disposition_t sctp_sf_eat_data_6_2(const struct sctp_endpoint *ep,
...
@@ -2551,6 +2422,9 @@ sctp_disposition_t sctp_sf_eat_data_6_2(const struct sctp_endpoint *ep,
SCTP_TO
(
SCTP_EVENT_TIMEOUT_SACK
));
SCTP_TO
(
SCTP_EVENT_TIMEOUT_SACK
));
}
}
return
SCTP_DISPOSITION_DISCARD
;
return
SCTP_DISPOSITION_DISCARD
;
consume:
return
SCTP_DISPOSITION_CONSUME
;
}
}
/*
/*
...
@@ -2576,11 +2450,7 @@ sctp_disposition_t sctp_sf_eat_data_fast_4_4(const struct sctp_endpoint *ep,
...
@@ -2576,11 +2450,7 @@ sctp_disposition_t sctp_sf_eat_data_fast_4_4(const struct sctp_endpoint *ep,
sctp_cmd_seq_t
*
commands
)
sctp_cmd_seq_t
*
commands
)
{
{
struct
sctp_chunk
*
chunk
=
arg
;
struct
sctp_chunk
*
chunk
=
arg
;
sctp_datahdr_t
*
data_hdr
;
int
error
;
struct
sctp_chunk
*
err
;
size_t
datalen
;
int
tmp
;
__u32
tsn
;
if
(
!
sctp_vtag_verify
(
chunk
,
asoc
))
{
if
(
!
sctp_vtag_verify
(
chunk
,
asoc
))
{
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_REPORT_BAD_TAG
,
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_REPORT_BAD_TAG
,
...
@@ -2588,110 +2458,8 @@ sctp_disposition_t sctp_sf_eat_data_fast_4_4(const struct sctp_endpoint *ep,
...
@@ -2588,110 +2458,8 @@ sctp_disposition_t sctp_sf_eat_data_fast_4_4(const struct sctp_endpoint *ep,
return
sctp_sf_pdiscard
(
ep
,
asoc
,
type
,
arg
,
commands
);
return
sctp_sf_pdiscard
(
ep
,
asoc
,
type
,
arg
,
commands
);
}
}
data_hdr
=
chunk
->
subh
.
data_hdr
=
(
sctp_datahdr_t
*
)
chunk
->
skb
->
data
;
skb_pull
(
chunk
->
skb
,
sizeof
(
sctp_datahdr_t
));
tsn
=
ntohl
(
data_hdr
->
tsn
);
SCTP_DEBUG_PRINTK
(
"eat_data: TSN 0x%x.
\n
"
,
tsn
);
/* ASSERT: Now skb->data is really the user data. */
/* Process ECN based congestion.
*
* Since the chunk structure is reused for all chunks within
* a packet, we use ecn_ce_done to track if we've already
* done CE processing for this packet.
*
* We need to do ECN processing even if we plan to discard the
* chunk later.
*/
if
(
!
chunk
->
ecn_ce_done
)
{
struct
sctp_af
*
af
;
chunk
->
ecn_ce_done
=
1
;
af
=
sctp_get_af_specific
(
ipver2af
(
chunk
->
skb
->
nh
.
iph
->
version
));
if
(
af
&&
af
->
is_ce
(
chunk
->
skb
)
&&
asoc
->
peer
.
ecn_capable
)
{
/* Do real work as sideffect. */
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_ECN_CE
,
SCTP_U32
(
tsn
));
}
}
tmp
=
sctp_tsnmap_check
(
&
asoc
->
peer
.
tsn_map
,
tsn
);
if
(
tmp
<
0
)
{
/* The TSN is too high--silently discard the chunk and
* count on it getting retransmitted later.
*/
goto
gen_shutdown
;
}
else
if
(
tmp
>
0
)
{
/* This is a duplicate. Record it. */
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_REPORT_DUP
,
SCTP_U32
(
tsn
));
goto
gen_shutdown
;
}
/* This is a new TSN. */
datalen
=
ntohs
(
chunk
->
chunk_hdr
->
length
);
datalen
-=
sizeof
(
sctp_data_chunk_t
);
/*
* Section 3.3.10.9 No User Data (9)
*
* Cause of error
* ---------------
* No User Data: This error cause is returned to the originator of a
* DATA chunk if a received DATA chunk has no user data.
*/
if
(
unlikely
(
0
==
datalen
))
{
err
=
sctp_make_abort_no_data
(
asoc
,
chunk
,
tsn
);
if
(
err
)
{
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_REPLY
,
SCTP_CHUNK
(
err
));
}
/* We are going to ABORT, so we might as well stop
* processing the rest of the chunks in the packet.
*/
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_DISCARD_PACKET
,
SCTP_NULL
());
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_ASSOC_FAILED
,
SCTP_U32
(
SCTP_ERROR_NO_DATA
));
SCTP_INC_STATS
(
SCTP_MIB_ABORTEDS
);
SCTP_DEC_STATS
(
SCTP_MIB_CURRESTAB
);
return
SCTP_DISPOSITION_CONSUME
;
}
/* We are accepting this DATA chunk. */
/* Record the fact that we have received this TSN. */
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_REPORT_TSN
,
SCTP_U32
(
tsn
));
if
(
chunk
->
chunk_hdr
->
flags
&
SCTP_DATA_UNORDERED
)
SCTP_INC_STATS
(
SCTP_MIB_INUNORDERCHUNKS
);
else
SCTP_INC_STATS
(
SCTP_MIB_INORDERCHUNKS
);
/* RFC 2960 6.5 Stream Identifier and Stream Sequence Number
*
* If an endpoint receive a DATA chunk with an invalid stream
* identifier, it shall acknowledge the reception of the DATA chunk
* following the normal procedure, immediately send an ERROR chunk
* with cause set to "Invalid Stream Identifier" (See Section 3.3.10)
* and discard the DATA chunk.
*/
if
(
ntohs
(
data_hdr
->
stream
)
>=
asoc
->
c
.
sinit_max_instreams
)
{
err
=
sctp_make_op_error
(
asoc
,
chunk
,
SCTP_ERROR_INV_STRM
,
&
data_hdr
->
stream
,
sizeof
(
data_hdr
->
stream
));
if
(
err
)
{
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_REPLY
,
SCTP_CHUNK
(
err
));
}
}
/* Go a head and force a SACK, since we are shutting down. */
/* Go a head and force a SACK, since we are shutting down. */
gen_shutdown:
/* Implementor's Guide.
/* Implementor's Guide.
*
*
* While in SHUTDOWN-SENT state, the SHUTDOWN sender MUST immediately
* While in SHUTDOWN-SENT state, the SHUTDOWN sender MUST immediately
...
@@ -2707,6 +2475,8 @@ sctp_disposition_t sctp_sf_eat_data_fast_4_4(const struct sctp_endpoint *ep,
...
@@ -2707,6 +2475,8 @@ sctp_disposition_t sctp_sf_eat_data_fast_4_4(const struct sctp_endpoint *ep,
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_TIMER_RESTART
,
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_TIMER_RESTART
,
SCTP_TO
(
SCTP_EVENT_TIMEOUT_T2_SHUTDOWN
));
SCTP_TO
(
SCTP_EVENT_TIMEOUT_T2_SHUTDOWN
));
}
}
consume:
return
SCTP_DISPOSITION_CONSUME
;
return
SCTP_DISPOSITION_CONSUME
;
}
}
...
@@ -4709,7 +4479,7 @@ struct sctp_sackhdr *sctp_sm_pull_sack(struct sctp_chunk *chunk)
...
@@ -4709,7 +4479,7 @@ struct sctp_sackhdr *sctp_sm_pull_sack(struct sctp_chunk *chunk)
num_blocks
=
ntohs
(
sack
->
num_gap_ack_blocks
);
num_blocks
=
ntohs
(
sack
->
num_gap_ack_blocks
);
num_dup_tsns
=
ntohs
(
sack
->
num_dup_tsns
);
num_dup_tsns
=
ntohs
(
sack
->
num_dup_tsns
);
len
=
sizeof
(
struct
sctp_sackhdr
);
len
=
sizeof
(
struct
sctp_sackhdr
);
len
=
(
num_blocks
+
num_dup_tsns
)
*
sizeof
(
__u32
);
len
+
=
(
num_blocks
+
num_dup_tsns
)
*
sizeof
(
__u32
);
if
(
len
>
chunk
->
skb
->
len
)
if
(
len
>
chunk
->
skb
->
len
)
return
NULL
;
return
NULL
;
...
@@ -4848,3 +4618,171 @@ void sctp_send_stale_cookie_err(const struct sctp_endpoint *ep,
...
@@ -4848,3 +4618,171 @@ void sctp_send_stale_cookie_err(const struct sctp_endpoint *ep,
sctp_chunk_free
(
err_chunk
);
sctp_chunk_free
(
err_chunk
);
}
}
}
}
/* Process a data chunk */
int
sctp_eat_data
(
const
struct
sctp_association
*
asoc
,
struct
sctp_chunk
*
chunk
,
sctp_cmd_seq_t
*
commands
)
{
sctp_datahdr_t
*
data_hdr
;
struct
sctp_chunk
*
err
;
size_t
datalen
;
sctp_verb_t
deliver
;
int
tmp
;
__u32
tsn
;
data_hdr
=
chunk
->
subh
.
data_hdr
=
(
sctp_datahdr_t
*
)
chunk
->
skb
->
data
;
skb_pull
(
chunk
->
skb
,
sizeof
(
sctp_datahdr_t
));
tsn
=
ntohl
(
data_hdr
->
tsn
);
SCTP_DEBUG_PRINTK
(
"eat_data: TSN 0x%x.
\n
"
,
tsn
);
/* ASSERT: Now skb->data is really the user data. */
/* Process ECN based congestion.
*
* Since the chunk structure is reused for all chunks within
* a packet, we use ecn_ce_done to track if we've already
* done CE processing for this packet.
*
* We need to do ECN processing even if we plan to discard the
* chunk later.
*/
if
(
!
chunk
->
ecn_ce_done
)
{
struct
sctp_af
*
af
;
chunk
->
ecn_ce_done
=
1
;
af
=
sctp_get_af_specific
(
ipver2af
(
chunk
->
skb
->
nh
.
iph
->
version
));
if
(
af
&&
af
->
is_ce
(
chunk
->
skb
)
&&
asoc
->
peer
.
ecn_capable
)
{
/* Do real work as sideffect. */
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_ECN_CE
,
SCTP_U32
(
tsn
));
}
}
tmp
=
sctp_tsnmap_check
(
&
asoc
->
peer
.
tsn_map
,
tsn
);
if
(
tmp
<
0
)
{
/* The TSN is too high--silently discard the chunk and
* count on it getting retransmitted later.
*/
return
SCTP_IERROR_HIGH_TSN
;
}
else
if
(
tmp
>
0
)
{
/* This is a duplicate. Record it. */
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_REPORT_DUP
,
SCTP_U32
(
tsn
));
return
SCTP_IERROR_DUP_TSN
;
}
/* This is a new TSN. */
/* Discard if there is no room in the receive window.
* Actually, allow a little bit of overflow (up to a MTU).
*/
datalen
=
ntohs
(
chunk
->
chunk_hdr
->
length
);
datalen
-=
sizeof
(
sctp_data_chunk_t
);
deliver
=
SCTP_CMD_CHUNK_ULP
;
/* Think about partial delivery. */
if
((
datalen
>=
asoc
->
rwnd
)
&&
(
!
asoc
->
ulpq
.
pd_mode
))
{
/* Even if we don't accept this chunk there is
* memory pressure.
*/
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_PART_DELIVER
,
SCTP_NULL
());
}
/* Spill over rwnd a little bit. Note: While allowed, this spill over
* seems a bit troublesome in that frag_point varies based on
* PMTU. In cases, such as loopback, this might be a rather
* large spill over.
*/
if
(
!
asoc
->
rwnd
||
asoc
->
rwnd_over
||
(
datalen
>
asoc
->
rwnd
+
asoc
->
frag_point
))
{
/* If this is the next TSN, consider reneging to make
* room. Note: Playing nice with a confused sender. A
* malicious sender can still eat up all our buffer
* space and in the future we may want to detect and
* do more drastic reneging.
*/
if
(
sctp_tsnmap_has_gap
(
&
asoc
->
peer
.
tsn_map
)
&&
(
sctp_tsnmap_get_ctsn
(
&
asoc
->
peer
.
tsn_map
)
+
1
)
==
tsn
)
{
SCTP_DEBUG_PRINTK
(
"Reneging for tsn:%u
\n
"
,
tsn
);
deliver
=
SCTP_CMD_RENEGE
;
}
else
{
SCTP_DEBUG_PRINTK
(
"Discard tsn: %u len: %Zd, "
"rwnd: %d
\n
"
,
tsn
,
datalen
,
asoc
->
rwnd
);
return
SCTP_IERROR_IGNORE_TSN
;
}
}
/*
* Section 3.3.10.9 No User Data (9)
*
* Cause of error
* ---------------
* No User Data: This error cause is returned to the originator of a
* DATA chunk if a received DATA chunk has no user data.
*/
if
(
unlikely
(
0
==
datalen
))
{
err
=
sctp_make_abort_no_data
(
asoc
,
chunk
,
tsn
);
if
(
err
)
{
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_REPLY
,
SCTP_CHUNK
(
err
));
}
/* We are going to ABORT, so we might as well stop
* processing the rest of the chunks in the packet.
*/
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_DISCARD_PACKET
,
SCTP_NULL
());
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_ASSOC_FAILED
,
SCTP_U32
(
SCTP_ERROR_NO_DATA
));
SCTP_INC_STATS
(
SctpAborteds
);
SCTP_DEC_STATS
(
SctpCurrEstab
);
return
SCTP_IERROR_NO_DATA
;
}
/* If definately accepting the DATA chunk, record its TSN, otherwise
* wait for renege processing.
*/
if
(
SCTP_CMD_CHUNK_ULP
==
deliver
)
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_REPORT_TSN
,
SCTP_U32
(
tsn
));
/* Note: Some chunks may get overcounted (if we drop) or overcounted
* if we renege and the chunk arrives again.
*/
if
(
chunk
->
chunk_hdr
->
flags
&
SCTP_DATA_UNORDERED
)
SCTP_INC_STATS
(
SctpInUnorderChunks
);
else
SCTP_INC_STATS
(
SctpInOrderChunks
);
/* RFC 2960 6.5 Stream Identifier and Stream Sequence Number
*
* If an endpoint receive a DATA chunk with an invalid stream
* identifier, it shall acknowledge the reception of the DATA chunk
* following the normal procedure, immediately send an ERROR chunk
* with cause set to "Invalid Stream Identifier" (See Section 3.3.10)
* and discard the DATA chunk.
*/
if
(
ntohs
(
data_hdr
->
stream
)
>=
asoc
->
c
.
sinit_max_instreams
)
{
err
=
sctp_make_op_error
(
asoc
,
chunk
,
SCTP_ERROR_INV_STRM
,
&
data_hdr
->
stream
,
sizeof
(
data_hdr
->
stream
));
if
(
err
)
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_REPLY
,
SCTP_CHUNK
(
err
));
return
SCTP_IERROR_BAD_STREAM
;
}
/* Send the data up to the user. Note: Schedule the
* SCTP_CMD_CHUNK_ULP cmd before the SCTP_CMD_GEN_SACK, as the SACK
* chunk needs the updated rwnd.
*/
sctp_add_cmd_sf
(
commands
,
deliver
,
SCTP_CHUNK
(
chunk
));
return
SCTP_IERROR_NO_ERROR
;
}
net/sctp/socket.c
View file @
f8436e9f
...
@@ -1697,6 +1697,32 @@ static int sctp_setsockopt_peer_addr_params(struct sock *sk,
...
@@ -1697,6 +1697,32 @@ static int sctp_setsockopt_peer_addr_params(struct sock *sk,
if
(
copy_from_user
(
&
params
,
optval
,
optlen
))
if
(
copy_from_user
(
&
params
,
optval
,
optlen
))
return
-
EFAULT
;
return
-
EFAULT
;
/*
* API 7. Socket Options (setting the default value for the endpoint)
* All options that support specific settings on an association by
* filling in either an association id variable or a sockaddr_storage
* SHOULD also support setting of the same value for the entire endpoint
* (i.e. future associations). To accomplish this the following logic is
* used when setting one of these options:
* c) If neither the sockaddr_storage or association identification is
* set i.e. the sockaddr_storage is set to all 0's (INADDR_ANY) and
* the association identification is 0, the settings are a default
* and to be applied to the endpoint (all future associations).
*/
/* update default value for endpoint (all future associations) */
if
(
!
params
.
spp_assoc_id
&&
sctp_is_any
((
union
sctp_addr
*
)
&
params
.
spp_address
))
{
if
(
params
.
spp_hbinterval
)
sctp_sk
(
sk
)
->
paddrparam
.
spp_hbinterval
=
params
.
spp_hbinterval
;
if
(
sctp_max_retrans_path
)
sctp_sk
(
sk
)
->
paddrparam
.
spp_pathmaxrxt
=
params
.
spp_pathmaxrxt
;
return
0
;
}
trans
=
sctp_addr_id2transport
(
sk
,
&
params
.
spp_address
,
trans
=
sctp_addr_id2transport
(
sk
,
&
params
.
spp_address
,
params
.
spp_assoc_id
);
params
.
spp_assoc_id
);
if
(
!
trans
)
if
(
!
trans
)
...
@@ -2864,6 +2890,17 @@ static int sctp_getsockopt_peer_addr_params(struct sock *sk, int len,
...
@@ -2864,6 +2890,17 @@ static int sctp_getsockopt_peer_addr_params(struct sock *sk, int len,
if
(
copy_from_user
(
&
params
,
optval
,
len
))
if
(
copy_from_user
(
&
params
,
optval
,
len
))
return
-
EFAULT
;
return
-
EFAULT
;
/* If no association id is specified retrieve the default value
* for the endpoint that will be used for all future associations
*/
if
(
!
params
.
spp_assoc_id
&&
sctp_is_any
((
union
sctp_addr
*
)
&
params
.
spp_address
))
{
params
.
spp_hbinterval
=
sctp_sk
(
sk
)
->
paddrparam
.
spp_hbinterval
;
params
.
spp_pathmaxrxt
=
sctp_sk
(
sk
)
->
paddrparam
.
spp_pathmaxrxt
;
goto
done
;
}
trans
=
sctp_addr_id2transport
(
sk
,
&
params
.
spp_address
,
trans
=
sctp_addr_id2transport
(
sk
,
&
params
.
spp_address
,
params
.
spp_assoc_id
);
params
.
spp_assoc_id
);
if
(
!
trans
)
if
(
!
trans
)
...
@@ -2883,6 +2920,7 @@ static int sctp_getsockopt_peer_addr_params(struct sock *sk, int len,
...
@@ -2883,6 +2920,7 @@ static int sctp_getsockopt_peer_addr_params(struct sock *sk, int len,
*/
*/
params
.
spp_pathmaxrxt
=
trans
->
error_threshold
;
params
.
spp_pathmaxrxt
=
trans
->
error_threshold
;
done:
if
(
copy_to_user
(
optval
,
&
params
,
len
))
if
(
copy_to_user
(
optval
,
&
params
,
len
))
return
-
EFAULT
;
return
-
EFAULT
;
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment