Commit ab52120b authored by Jon Grimm's avatar Jon Grimm

[SCTP] Fix out_qlen (pending data) count.

Missed a few places on integration of TX delay code.  Also removed 
out the 1/2 cwnd check so we behave more like pure Nagle. 
parent acbabcbe
......@@ -477,8 +477,9 @@ static sctp_xmit_t sctp_packet_append_data(struct sctp_packet *packet,
size_t datasize, rwnd, inflight;
struct sctp_transport *transport = packet->transport;
__u32 max_burst_bytes;
struct sctp_opt *sp = sctp_sk(transport->asoc->base.sk);
struct sctp_outq *q = &transport->asoc->outqueue;
struct sctp_association *asoc = transport->asoc;
struct sctp_opt *sp = sctp_sk(asoc->base.sk);
struct sctp_outq *q = &asoc->outqueue;
/* RFC 2960 6.1 Transmission of DATA Chunks
*
......@@ -493,8 +494,8 @@ static sctp_xmit_t sctp_packet_append_data(struct sctp_packet *packet,
* receiver to the data sender.
*/
rwnd = transport->asoc->peer.rwnd;
inflight = transport->asoc->outqueue.outstanding_bytes;
rwnd = asoc->peer.rwnd;
inflight = asoc->outqueue.outstanding_bytes;
datasize = sctp_data_size(chunk);
......@@ -516,7 +517,7 @@ static sctp_xmit_t sctp_packet_append_data(struct sctp_packet *packet,
* if ((flightsize + Max.Burst * MTU) < cwnd)
* cwnd = flightsize + Max.Burst * MTU
*/
max_burst_bytes = transport->asoc->max_burst * transport->asoc->pmtu;
max_burst_bytes = asoc->max_burst * asoc->pmtu;
if ((transport->flight_size + max_burst_bytes) < transport->cwnd) {
transport->cwnd = transport->flight_size + max_burst_bytes;
SCTP_DEBUG_PRINTK("%s: cwnd limited by max_burst: "
......@@ -549,24 +550,18 @@ static sctp_xmit_t sctp_packet_append_data(struct sctp_packet *packet,
}
/* Nagle's algorithm to solve small-packet problem:
* inhibit the sending of new chunks when new outgoing data arrives
* if any proeviously transmitted data on the connection remains
* unacknowledged. Unless the connection was previously idle. Check
* whether the connection is idle. No outstanding means idle, flush
* it. If outstanding bytes are less than half cwnd, the connection
* is not in the state of congestion, so also flush it.
* Inhibit the sending of new chunks when new outgoing data arrives
* if any previously transmitted data on the connection remains
* unacknowledged.
*/
if (!sp->nodelay && q->outstanding_bytes >= transport->cwnd >> 1) {
if (!sp->nodelay && q->outstanding_bytes) {
unsigned len = datasize + q->out_qlen;
/* Check whether this chunk and all the rest of pending
* data will fit or whether we'll choose to delay in
* hopes of bundling a full sized packet.
* data will fit or delay in hopes of bundling a full
* sized packet.
*/
if ((datasize + q->out_qlen) < transport->asoc->frag_point) {
/* If the the chunk should be delay
* for future sending, we could not
* append it.
*/
if (len < asoc->pmtu - SCTP_IP_OVERHEAD) {
retval = SCTP_XMIT_NAGLE_DELAY;
goto finish;
}
......@@ -576,16 +571,15 @@ static sctp_xmit_t sctp_packet_append_data(struct sctp_packet *packet,
transport->flight_size += datasize;
/* Keep track of how many bytes are in flight to the receiver. */
transport->asoc->outqueue.outstanding_bytes += datasize;
asoc->outqueue.outstanding_bytes += datasize;
/* Update our view of the receiver's rwnd. */
if (datasize < rwnd) {
if (datasize < rwnd)
rwnd -= datasize;
} else {
else
rwnd = 0;
}
transport->asoc->peer.rwnd = rwnd;
asoc->peer.rwnd = rwnd;
finish:
return retval;
......
......@@ -89,6 +89,16 @@ static inline void sctp_outq_tail_data(struct sctp_outq *q,
return;
}
/* Insert a chunk behind chunk 'pos'. */
static inline void sctp_outq_insert_data(struct sctp_outq *q,
struct sctp_chunk *ch,
struct sctp_chunk *pos)
{
__skb_insert((struct sk_buff *)ch, (struct sk_buff *)pos->prev,
(struct sk_buff *)pos, pos->list);
q->out_qlen += ch->skb->len;
}
/* Generate a new outqueue. */
struct sctp_outq *sctp_outq_new(sctp_association_t *asoc)
{
......@@ -478,8 +488,8 @@ static int sctp_outq_flush_rtx(struct sctp_outq *q, struct sctp_packet *pkt,
* queue. 'pos' points to the next chunk in the output queue after the
* chunk that is currently in the process of fragmentation.
*/
void sctp_xmit_frag(struct sctp_outq *q, struct sk_buff *pos,
struct sctp_packet *packet, sctp_chunk_t *frag, __u32 tsn)
void sctp_xmit_frag(struct sctp_outq *q, struct sctp_chunk *pos,
struct sctp_packet *packet, struct sctp_chunk *frag, __u32 tsn)
{
struct sctp_transport *transport = packet->transport;
struct sk_buff_head *queue = &q->out;
......@@ -498,10 +508,9 @@ void sctp_xmit_frag(struct sctp_outq *q, struct sk_buff *pos,
"adding 0x%x to outqueue\n",
ntohl(frag->subh.data_hdr->tsn));
if (pos)
__skb_insert((struct sk_buff *)frag, pos->prev,
pos, pos->list);
sctp_outq_insert_data(q, frag, pos);
else
__skb_queue_tail(queue, (struct sk_buff *) frag);
sctp_outq_tail_data(q, frag);
return;
}
......@@ -514,10 +523,9 @@ void sctp_xmit_frag(struct sctp_outq *q, struct sk_buff *pos,
"adding 0x%x to outqueue\n",
ntohl(frag->subh.data_hdr->tsn));
if (pos)
__skb_insert((struct sk_buff *)frag, pos->prev,
pos, pos->list);
sctp_outq_insert_data(q, frag, pos);
else
__skb_queue_tail(queue, (struct sk_buff *)frag);
sctp_outq_tail_data(q, frag);
break;
case SCTP_XMIT_OK:
......@@ -530,10 +538,9 @@ void sctp_xmit_frag(struct sctp_outq *q, struct sk_buff *pos,
"failed. adding 0x%x to outqueue\n",
ntohl(frag->subh.data_hdr->tsn));
if (pos)
__skb_insert((struct sk_buff *)frag, pos->prev,
pos, pos->list);
sctp_outq_insert_data(q, frag, pos);
else
__skb_queue_tail(queue,(struct sk_buff *)frag);
sctp_outq_tail_data(q, frag);
} else {
SCTP_DEBUG_PRINTK("sctp_xmit_frag: force output "
"success. 0x%x sent\n",
......@@ -561,7 +568,7 @@ void sctp_xmit_fragmented_chunks(struct sctp_outq *q, struct sctp_packet *pkt,
struct list_head *lfrag, *frag_list;
__u32 tsn;
int nfrags = 1;
struct sk_buff *pos;
struct sctp_chunk *pos;
/* Count the number of fragments. */
frag_list = &frag->frag_list;
......@@ -572,7 +579,7 @@ void sctp_xmit_fragmented_chunks(struct sctp_outq *q, struct sctp_packet *pkt,
/* Get a TSN block of nfrags TSNs. */
tsn = sctp_association_get_tsn_block(asoc, nfrags);
pos = skb_peek(&q->out);
pos = (struct sctp_chunk *)skb_peek(&q->out);
/* Transmit the first fragment. */
sctp_xmit_frag(q, pos, pkt, frag, tsn++);
......@@ -735,7 +742,7 @@ int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout)
}
queue = &q->control;
while (NULL != (chunk = (sctp_chunk_t *)skb_dequeue(queue))) {
while ((chunk = (sctp_chunk_t *)skb_dequeue(queue))) {
/* Pick the right transport to use. */
new_transport = chunk->transport;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment