Commit a276e83e authored by Jon Grimm's avatar Jon Grimm

[SCTP] Renege to make room for CTSN+1 chunk.

If our receive buffer is full, but this is the most important TSN 
to receive, make room by reneging less important TSNs.  Only renege
if there is a gap and this is the next TSN to fit in the gap.  
parent 8ae0801a
......@@ -68,7 +68,6 @@ typedef enum {
SCTP_CMD_INIT_RESTART, /* High level, do init timer work. */
SCTP_CMD_INIT_FAILED, /* High level, do init failure work. */
SCTP_CMD_REPORT_DUP, /* Report a duplicate TSN. */
SCTP_CMD_REPORT_BIGGAP, /* Narc on a TSN (it was too high). */
SCTP_CMD_STRIKE, /* Mark a strike against a transport. */
SCTP_CMD_TRANSMIT, /* Transmit the outqueue. */
SCTP_CMD_HB_TIMERS_START, /* Start the heartbeat timers. */
......@@ -86,8 +85,8 @@ typedef enum {
SCTP_CMD_PURGE_OUTQUEUE, /* Purge all data waiting to be sent. */
SCTP_CMD_SETUP_T2, /* Hi-level, setup T2-shutdown parms. */
SCTP_CMD_RTO_PENDING, /* Set transport's rto_pending. */
SCTP_CMD_CHUNK_PD, /* Partial data delivery considerations. */
SCTP_CMD_PART_DELIVER, /* Partial data delivery considerations. */
SCTP_CMD_RENEGE, /* Renege data on an association. */
SCTP_CMD_LAST
} sctp_verb_t;
......
......@@ -6,13 +6,13 @@
* These are the definitions needed for the tsnmap type. The tsnmap is used
* to track out of order TSNs received.
*
* The SCTP reference implementation is free software;
* The SCTP reference implementation is free software;
* you can redistribute it and/or modify it under the terms of
* the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* the SCTP reference implementation is distributed in the hope that it
* The SCTP reference implementation is distributed in the hope that it
* will be useful, but WITHOUT ANY WARRANTY; without even the implied
* ************************
* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
......@@ -23,12 +23,17 @@
* the Free Software Foundation, 59 Temple Place - Suite 330,
* Boston, MA 02111-1307, USA.
*
* Please send any bug reports or fixes you make to one of the
* following email addresses:
* Please send any bug reports or fixes you make to the
* email address(es):
* lksctp developers <lksctp-developers@lists.sourceforge.net>
*
* Jon Grimm <jgrimm@us.ibm.com>
* La Monte H.P. Yarroll <piggy@acm.org>
* Karl Knutson <karl@athena.chicago.il.us>
* Or submit a bug report through the following website:
* http://www.sf.net/projects/lksctp
*
* Written or modified by:
* Jon Grimm <jgrimm@us.ibm.com>
* La Monte H.P. Yarroll <piggy@acm.org>
* Karl Knutson <karl@athena.chicago.il.us>
*
* Any bugs reported given to us we will try to fix... any fixes shared will
* be incorporated into the next SCTP release.
......@@ -153,15 +158,18 @@ static inline __u32 *sctp_tsnmap_get_dups(struct sctp_tsnmap *map)
return map->dup_tsns;
}
/* Mark a duplicate TSN. Note: we limit how many we are willing to
* store and consequently report.
/* Mark a duplicate TSN. Note: limit the storage of duplicate TSN
* information.
*/
static inline void sctp_tsnmap_mark_dup(struct sctp_tsnmap *map, __u32 tsn)
{
if (map->num_dup_tsns < SCTP_MAX_DUP_TSNS)
map->dup_tsns[map->num_dup_tsns++] = tsn;
map->dup_tsns[map->num_dup_tsns++] = htonl(tsn);
}
/* Renege a TSN that was seen. */
void sctp_tsnmap_renege(struct sctp_tsnmap *, __u32 tsn);
/* Is there a gap in the TSN map? */
int sctp_tsnmap_has_gap(const struct sctp_tsnmap *);
......@@ -176,6 +184,3 @@ int sctp_tsnmap_next_gap_ack(const struct sctp_tsnmap *,
struct sctp_tsnmap_iter *,__u16 *start, __u16 *end);
#endif /* __sctp_tsnmap_h__ */
......@@ -66,6 +66,9 @@ int sctp_ulpq_tail_data(struct sctp_ulpq *, struct sctp_chunk *, int);
/* Add a new event for propogation to the ULP. */
int sctp_ulpq_tail_event(struct sctp_ulpq *, struct sctp_ulpevent *ev);
/* Renege previously received chunks. */
void sctp_ulpq_renege(struct sctp_ulpq *, struct sctp_chunk *, int);
/* Perform partial delivery. */
void sctp_ulpq_partial_delivery(struct sctp_ulpq *, struct sctp_chunk *, int);
......
......@@ -1004,8 +1004,8 @@ void sctp_assoc_rwnd_increase(sctp_association_t *asoc, int len)
((asoc->rwnd - asoc->a_rwnd) >=
min_t(__u32, (asoc->base.sk->rcvbuf >> 1), asoc->pmtu))) {
SCTP_DEBUG_PRINTK("%s: Sending window update SACK- asoc: %p "
"rwnd: %u a_rwnd: %u\n",
__FUNCTION__, asoc, asoc->rwnd, asoc->a_rwnd);
"rwnd: %u a_rwnd: %u\n", __FUNCTION__,
asoc, asoc->rwnd, asoc->a_rwnd);
sack = sctp_make_sack(asoc);
if (!sack)
return;
......
This diff is collapsed.
......@@ -145,7 +145,7 @@ sctp_disposition_t sctp_sf_do_4_C(const sctp_endpoint_t *ep,
sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
SCTP_STATE(SCTP_STATE_CLOSED));
SCTP_INC_STATS(SctpShutdowns);
SCTP_DEC_STATS(SctpCurrEstab);
......@@ -682,7 +682,6 @@ sctp_disposition_t sctp_sf_do_5_1E_ca(const sctp_endpoint_t *ep,
sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, SCTP_ULPEVENT(ev));
return SCTP_DISPOSITION_CONSUME;
nomem:
return SCTP_DISPOSITION_NOMEM;
}
......@@ -2274,7 +2273,6 @@ sctp_disposition_t sctp_sf_eat_data_6_2(const sctp_endpoint_t *ep,
* that the value in the Verification Tag field of the
* received SCTP packet matches its own Tag.
*/
if (ntohl(chunk->sctp_hdr->vtag) != asoc->c.my_vtag) {
sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_BAD_TAG,
SCTP_NULL());
......@@ -2339,21 +2337,29 @@ sctp_disposition_t sctp_sf_eat_data_6_2(const sctp_endpoint_t *ep,
/* Even if we don't accept this chunk there is
* memory pressure.
*/
sctp_add_cmd_sf(commands, SCTP_CMD_CHUNK_PD, SCTP_NULL());
sctp_add_cmd_sf(commands, SCTP_CMD_PART_DELIVER, SCTP_NULL());
}
/* Spill over rwnd a little bit. Note: While allowed, this spill over
* seems a bit troublesome in that frag_point varies based on
* PMTU. In cases, such as loopback, this might be a rather
* large spill over.
*/
if (asoc->rwnd_over || (datalen > asoc->rwnd + asoc->frag_point)) {
/* There is absolutely no room, but this is the most
* important tsn that we are waiting on, try to
* to partial deliver or renege to make room.
/* If this is the next TSN, consider reneging to make
* room. Note: Playing nice with a confused sender. A
* malicious sender can still eat up all our buffer
* space and in the future we may want to detect and
* do more drastic reneging.
*/
if ((sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map) + 1) == tsn) {
deliver = SCTP_CMD_CHUNK_PD;
if (sctp_tsnmap_has_gap(&asoc->peer.tsn_map) &&
(sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map) + 1) == tsn) {
SCTP_DEBUG_PRINTK("Reneging for tsn:%u\n", tsn);
deliver = SCTP_CMD_RENEGE;
} else {
SCTP_DEBUG_PRINTK("Discard tsn: %u len: %Zd, "
"rwnd: %d\n", tsn, datalen,
"rwnd: %d\n", tsn, datalen,
asoc->rwnd);
goto discard_force;
}
......@@ -2379,21 +2385,23 @@ sctp_disposition_t sctp_sf_eat_data_6_2(const sctp_endpoint_t *ep,
sctp_add_cmd_sf(commands, SCTP_CMD_DISCARD_PACKET,SCTP_NULL());
sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, SCTP_NULL());
SCTP_INC_STATS(SctpAborteds);
SCTP_INC_STATS(SctpCurrEstab);
SCTP_DEC_STATS(SctpCurrEstab);
return SCTP_DISPOSITION_CONSUME;
}
/* If definately accepting the DATA chunk, record its TSN, otherwise
* wait for renege processing.
* wait for renege processing.
*/
if (deliver != SCTP_CMD_CHUNK_PD) {
if (SCTP_CMD_CHUNK_ULP == deliver)
sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_TSN, SCTP_U32(tsn));
if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED)
SCTP_INC_STATS(SctpInUnorderChunks);
else
SCTP_INC_STATS(SctpInOrderChunks);
}
/* Note: Some chunks may get overcounted (if we drop) or overcounted
* if we renege and the chunk arrives again.
*/
if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED)
SCTP_INC_STATS(SctpInUnorderChunks);
else
SCTP_INC_STATS(SctpInOrderChunks);
/* RFC 2960 6.5 Stream Identifier and Stream Sequence Number
*
......@@ -2592,7 +2600,7 @@ sctp_disposition_t sctp_sf_eat_data_fast_4_4(const sctp_endpoint_t *ep,
sctp_add_cmd_sf(commands, SCTP_CMD_DISCARD_PACKET,SCTP_NULL());
sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, SCTP_NULL());
SCTP_INC_STATS(SctpAborteds);
SCTP_INC_STATS(SctpCurrEstab);
SCTP_DEC_STATS(SctpCurrEstab);
return SCTP_DISPOSITION_CONSUME;
}
......
......@@ -385,3 +385,23 @@ static void sctp_tsnmap_find_gap_ack(__u8 *map, __u16 off,
}
}
}
/* Renege that we have seen a TSN. */
void sctp_tsnmap_renege(struct sctp_tsnmap *map, __u32 tsn)
{
__s32 gap;
if (TSN_lt(tsn, map->base_tsn))
return;
if (!TSN_lt(tsn, map->base_tsn + map->len + map->len))
return;
/* Assert: TSN is in range. */
gap = tsn - map->base_tsn;
/* Pretend we never saw the TSN. */
if (gap < map->len)
map->tsn_map[gap] = 0;
else
map->overflow_map[gap - map->len] = 0;
}
......@@ -655,32 +655,119 @@ static inline struct sctp_ulpevent *sctp_ulpq_order(struct sctp_ulpq *ulpq,
return event;
}
/* Renege 'needed' bytes from the ordering queue. */
static __u16 sctp_ulpq_renege_order(struct sctp_ulpq *ulpq, __u16 needed)
{
__u16 freed = 0;
__u32 tsn;
struct sk_buff *skb;
struct sctp_ulpevent *event;
struct sctp_tsnmap *tsnmap;
tsnmap = &ulpq->asoc->peer.tsn_map;
while ((skb = __skb_dequeue_tail(&ulpq->lobby))) {
freed += skb_headlen(skb);
event = sctp_skb2event(skb);
tsn = event->sndrcvinfo.sinfo_tsn;
sctp_ulpevent_free(event);
sctp_tsnmap_renege(tsnmap, tsn);
if (freed >= needed)
return freed;
}
return freed;
}
/* Renege 'needed' bytes from the reassembly queue. */
static __u16 sctp_ulpq_renege_frags(struct sctp_ulpq *ulpq, __u16 needed)
{
__u16 freed = 0;
__u32 tsn;
struct sk_buff *skb;
struct sctp_ulpevent *event;
struct sctp_tsnmap *tsnmap;
tsnmap = &ulpq->asoc->peer.tsn_map;
/* Walk backwards through the list, reneges the newest tsns. */
while ((skb = __skb_dequeue_tail(&ulpq->reasm))) {
freed += skb_headlen(skb);
event = sctp_skb2event(skb);
tsn = event->sndrcvinfo.sinfo_tsn;
sctp_ulpevent_free(event);
sctp_tsnmap_renege(tsnmap, tsn);
if (freed >= needed)
return freed;
}
return freed;
}
/* Partial deliver the first message as there is pressure on rwnd. */
void sctp_ulpq_partial_delivery(struct sctp_ulpq *ulpq,
struct sctp_chunk *chunk, int priority)
{
struct sctp_ulpevent *event;
struct sctp_association *asoc;
asoc = ulpq->asoc;
/* Are we already in partial delivery mode? */
if (!sctp_sk(ulpq->asoc->base.sk)->pd_mode) {
if (!sctp_sk(asoc->base.sk)->pd_mode) {
/* Is partial delivery possible? */
event = sctp_ulpq_retrieve_first(ulpq);
/* Send event to the ULP. */
if (event) {
sctp_ulpq_tail_event(ulpq, event);
sctp_sk(ulpq->asoc->base.sk)->pd_mode = 1;
sctp_sk(asoc->base.sk)->pd_mode = 1;
ulpq->pd_mode = 1;
return;
}
}
}
/* Assert: Either already in partial delivery mode or partial
* delivery wasn't possible, so now the only recourse is
* to renege. FIXME: Add renege support starts here.
*/
/* Renege some packets to make room for an incoming chunk. */
void sctp_ulpq_renege(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
int priority)
{
struct sctp_association *asoc;
__u16 needed, freed;
asoc = ulpq->asoc;
if (chunk) {
needed = ntohs(chunk->chunk_hdr->length);
needed -= sizeof(sctp_data_chunk_t);
} else
needed = SCTP_DEFAULT_MAXWINDOW;
freed = 0;
if (skb_queue_empty(&asoc->base.sk->receive_queue)) {
freed = sctp_ulpq_renege_order(ulpq, needed);
if (freed < needed) {
freed += sctp_ulpq_renege_frags(ulpq, needed - freed);
}
}
/* If able to free enough room, accept this chunk. */
if (chunk && (freed >= needed)) {
__u32 tsn;
tsn = ntohl(chunk->subh.data_hdr->tsn);
sctp_tsnmap_mark(&asoc->peer.tsn_map, tsn);
sctp_ulpq_tail_data(ulpq, chunk, priority);
sctp_ulpq_partial_delivery(ulpq, chunk, priority);
}
return;
}
/* Notify the application if an association is aborted and in
* partial delivery mode. Send up any pending received messages.
*/
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment