Commit ab52120b authored by Jon Grimm's avatar Jon Grimm

[SCTP] Fix out_qlen (pending data) count.

Missed a few places on integration of TX delay code.  Also removed 
out the 1/2 cwnd check so we behave more like pure Nagle. 
parent acbabcbe
...@@ -158,7 +158,7 @@ sctp_xmit_t sctp_packet_transmit_chunk(struct sctp_packet *packet, ...@@ -158,7 +158,7 @@ sctp_xmit_t sctp_packet_transmit_chunk(struct sctp_packet *packet,
/* Append a chunk to the offered packet reporting back any inability to do /* Append a chunk to the offered packet reporting back any inability to do
* so. * so.
*/ */
sctp_xmit_t sctp_packet_append_chunk(struct sctp_packet *packet, sctp_xmit_t sctp_packet_append_chunk(struct sctp_packet *packet,
struct sctp_chunk *chunk) struct sctp_chunk *chunk)
{ {
sctp_xmit_t retval = SCTP_XMIT_OK; sctp_xmit_t retval = SCTP_XMIT_OK;
...@@ -180,7 +180,7 @@ sctp_xmit_t sctp_packet_append_chunk(struct sctp_packet *packet, ...@@ -180,7 +180,7 @@ sctp_xmit_t sctp_packet_append_chunk(struct sctp_packet *packet,
/* Both control chunks and data chunks with TSNs are /* Both control chunks and data chunks with TSNs are
* non-fragmentable. * non-fragmentable.
*/ */
int fragmentable = sctp_chunk_is_data(chunk) && int fragmentable = sctp_chunk_is_data(chunk) &&
(!chunk->has_tsn); (!chunk->has_tsn);
if (packet_empty) { if (packet_empty) {
if (fragmentable) { if (fragmentable) {
...@@ -470,15 +470,16 @@ static void sctp_packet_reset(struct sctp_packet *packet) ...@@ -470,15 +470,16 @@ static void sctp_packet_reset(struct sctp_packet *packet)
} }
/* This private function handles the specifics of appending DATA chunks. */ /* This private function handles the specifics of appending DATA chunks. */
static sctp_xmit_t sctp_packet_append_data(struct sctp_packet *packet, static sctp_xmit_t sctp_packet_append_data(struct sctp_packet *packet,
struct sctp_chunk *chunk) struct sctp_chunk *chunk)
{ {
sctp_xmit_t retval = SCTP_XMIT_OK; sctp_xmit_t retval = SCTP_XMIT_OK;
size_t datasize, rwnd, inflight; size_t datasize, rwnd, inflight;
struct sctp_transport *transport = packet->transport; struct sctp_transport *transport = packet->transport;
__u32 max_burst_bytes; __u32 max_burst_bytes;
struct sctp_opt *sp = sctp_sk(transport->asoc->base.sk); struct sctp_association *asoc = transport->asoc;
struct sctp_outq *q = &transport->asoc->outqueue; struct sctp_opt *sp = sctp_sk(asoc->base.sk);
struct sctp_outq *q = &asoc->outqueue;
/* RFC 2960 6.1 Transmission of DATA Chunks /* RFC 2960 6.1 Transmission of DATA Chunks
* *
...@@ -493,8 +494,8 @@ static sctp_xmit_t sctp_packet_append_data(struct sctp_packet *packet, ...@@ -493,8 +494,8 @@ static sctp_xmit_t sctp_packet_append_data(struct sctp_packet *packet,
* receiver to the data sender. * receiver to the data sender.
*/ */
rwnd = transport->asoc->peer.rwnd; rwnd = asoc->peer.rwnd;
inflight = transport->asoc->outqueue.outstanding_bytes; inflight = asoc->outqueue.outstanding_bytes;
datasize = sctp_data_size(chunk); datasize = sctp_data_size(chunk);
...@@ -516,7 +517,7 @@ static sctp_xmit_t sctp_packet_append_data(struct sctp_packet *packet, ...@@ -516,7 +517,7 @@ static sctp_xmit_t sctp_packet_append_data(struct sctp_packet *packet,
* if ((flightsize + Max.Burst * MTU) < cwnd) * if ((flightsize + Max.Burst * MTU) < cwnd)
* cwnd = flightsize + Max.Burst * MTU * cwnd = flightsize + Max.Burst * MTU
*/ */
max_burst_bytes = transport->asoc->max_burst * transport->asoc->pmtu; max_burst_bytes = asoc->max_burst * asoc->pmtu;
if ((transport->flight_size + max_burst_bytes) < transport->cwnd) { if ((transport->flight_size + max_burst_bytes) < transport->cwnd) {
transport->cwnd = transport->flight_size + max_burst_bytes; transport->cwnd = transport->flight_size + max_burst_bytes;
SCTP_DEBUG_PRINTK("%s: cwnd limited by max_burst: " SCTP_DEBUG_PRINTK("%s: cwnd limited by max_burst: "
...@@ -549,26 +550,20 @@ static sctp_xmit_t sctp_packet_append_data(struct sctp_packet *packet, ...@@ -549,26 +550,20 @@ static sctp_xmit_t sctp_packet_append_data(struct sctp_packet *packet,
} }
/* Nagle's algorithm to solve small-packet problem: /* Nagle's algorithm to solve small-packet problem:
* inhibit the sending of new chunks when new outgoing data arrives * Inhibit the sending of new chunks when new outgoing data arrives
* if any proeviously transmitted data on the connection remains * if any previously transmitted data on the connection remains
* unacknowledged. Unless the connection was previously idle. Check * unacknowledged.
* whether the connection is idle. No outstanding means idle, flush
* it. If outstanding bytes are less than half cwnd, the connection
* is not in the state of congestion, so also flush it.
*/ */
if (!sp->nodelay && q->outstanding_bytes >= transport->cwnd >> 1) { if (!sp->nodelay && q->outstanding_bytes) {
/* Check whether this chunk and all the rest of pending unsigned len = datasize + q->out_qlen;
* data will fit or whether we'll choose to delay in /* Check whether this chunk and all the rest of pending
* hopes of bundling a full sized packet. * data will fit or delay in hopes of bundling a full
* sized packet.
*/ */
if ((datasize + q->out_qlen) < transport->asoc->frag_point) {
if (len < asoc->pmtu - SCTP_IP_OVERHEAD) {
/* If the the chunk should be delay
* for future sending, we could not
* append it.
*/
retval = SCTP_XMIT_NAGLE_DELAY; retval = SCTP_XMIT_NAGLE_DELAY;
goto finish; goto finish;
} }
} }
...@@ -576,16 +571,15 @@ static sctp_xmit_t sctp_packet_append_data(struct sctp_packet *packet, ...@@ -576,16 +571,15 @@ static sctp_xmit_t sctp_packet_append_data(struct sctp_packet *packet,
transport->flight_size += datasize; transport->flight_size += datasize;
/* Keep track of how many bytes are in flight to the receiver. */ /* Keep track of how many bytes are in flight to the receiver. */
transport->asoc->outqueue.outstanding_bytes += datasize; asoc->outqueue.outstanding_bytes += datasize;
/* Update our view of the receiver's rwnd. */ /* Update our view of the receiver's rwnd. */
if (datasize < rwnd) { if (datasize < rwnd)
rwnd -= datasize; rwnd -= datasize;
} else { else
rwnd = 0; rwnd = 0;
}
transport->asoc->peer.rwnd = rwnd; asoc->peer.rwnd = rwnd;
finish: finish:
return retval; return retval;
......
...@@ -89,6 +89,16 @@ static inline void sctp_outq_tail_data(struct sctp_outq *q, ...@@ -89,6 +89,16 @@ static inline void sctp_outq_tail_data(struct sctp_outq *q,
return; return;
} }
/* Insert a chunk behind chunk 'pos'. */
static inline void sctp_outq_insert_data(struct sctp_outq *q,
struct sctp_chunk *ch,
struct sctp_chunk *pos)
{
__skb_insert((struct sk_buff *)ch, (struct sk_buff *)pos->prev,
(struct sk_buff *)pos, pos->list);
q->out_qlen += ch->skb->len;
}
/* Generate a new outqueue. */ /* Generate a new outqueue. */
struct sctp_outq *sctp_outq_new(sctp_association_t *asoc) struct sctp_outq *sctp_outq_new(sctp_association_t *asoc)
{ {
...@@ -478,8 +488,8 @@ static int sctp_outq_flush_rtx(struct sctp_outq *q, struct sctp_packet *pkt, ...@@ -478,8 +488,8 @@ static int sctp_outq_flush_rtx(struct sctp_outq *q, struct sctp_packet *pkt,
* queue. 'pos' points to the next chunk in the output queue after the * queue. 'pos' points to the next chunk in the output queue after the
* chunk that is currently in the process of fragmentation. * chunk that is currently in the process of fragmentation.
*/ */
void sctp_xmit_frag(struct sctp_outq *q, struct sk_buff *pos, void sctp_xmit_frag(struct sctp_outq *q, struct sctp_chunk *pos,
struct sctp_packet *packet, sctp_chunk_t *frag, __u32 tsn) struct sctp_packet *packet, struct sctp_chunk *frag, __u32 tsn)
{ {
struct sctp_transport *transport = packet->transport; struct sctp_transport *transport = packet->transport;
struct sk_buff_head *queue = &q->out; struct sk_buff_head *queue = &q->out;
...@@ -497,11 +507,10 @@ void sctp_xmit_frag(struct sctp_outq *q, struct sk_buff *pos, ...@@ -497,11 +507,10 @@ void sctp_xmit_frag(struct sctp_outq *q, struct sk_buff *pos,
SCTP_DEBUG_PRINTK("sctp_xmit_frag: q not empty. " SCTP_DEBUG_PRINTK("sctp_xmit_frag: q not empty. "
"adding 0x%x to outqueue\n", "adding 0x%x to outqueue\n",
ntohl(frag->subh.data_hdr->tsn)); ntohl(frag->subh.data_hdr->tsn));
if (pos) if (pos)
__skb_insert((struct sk_buff *)frag, pos->prev, sctp_outq_insert_data(q, frag, pos);
pos, pos->list);
else else
__skb_queue_tail(queue, (struct sk_buff *) frag); sctp_outq_tail_data(q, frag);
return; return;
} }
...@@ -514,10 +523,9 @@ void sctp_xmit_frag(struct sctp_outq *q, struct sk_buff *pos, ...@@ -514,10 +523,9 @@ void sctp_xmit_frag(struct sctp_outq *q, struct sk_buff *pos,
"adding 0x%x to outqueue\n", "adding 0x%x to outqueue\n",
ntohl(frag->subh.data_hdr->tsn)); ntohl(frag->subh.data_hdr->tsn));
if (pos) if (pos)
__skb_insert((struct sk_buff *)frag, pos->prev, sctp_outq_insert_data(q, frag, pos);
pos, pos->list);
else else
__skb_queue_tail(queue, (struct sk_buff *)frag); sctp_outq_tail_data(q, frag);
break; break;
case SCTP_XMIT_OK: case SCTP_XMIT_OK:
...@@ -530,10 +538,9 @@ void sctp_xmit_frag(struct sctp_outq *q, struct sk_buff *pos, ...@@ -530,10 +538,9 @@ void sctp_xmit_frag(struct sctp_outq *q, struct sk_buff *pos,
"failed. adding 0x%x to outqueue\n", "failed. adding 0x%x to outqueue\n",
ntohl(frag->subh.data_hdr->tsn)); ntohl(frag->subh.data_hdr->tsn));
if (pos) if (pos)
__skb_insert((struct sk_buff *)frag, pos->prev, sctp_outq_insert_data(q, frag, pos);
pos, pos->list);
else else
__skb_queue_tail(queue,(struct sk_buff *)frag); sctp_outq_tail_data(q, frag);
} else { } else {
SCTP_DEBUG_PRINTK("sctp_xmit_frag: force output " SCTP_DEBUG_PRINTK("sctp_xmit_frag: force output "
"success. 0x%x sent\n", "success. 0x%x sent\n",
...@@ -561,7 +568,7 @@ void sctp_xmit_fragmented_chunks(struct sctp_outq *q, struct sctp_packet *pkt, ...@@ -561,7 +568,7 @@ void sctp_xmit_fragmented_chunks(struct sctp_outq *q, struct sctp_packet *pkt,
struct list_head *lfrag, *frag_list; struct list_head *lfrag, *frag_list;
__u32 tsn; __u32 tsn;
int nfrags = 1; int nfrags = 1;
struct sk_buff *pos; struct sctp_chunk *pos;
/* Count the number of fragments. */ /* Count the number of fragments. */
frag_list = &frag->frag_list; frag_list = &frag->frag_list;
...@@ -572,7 +579,7 @@ void sctp_xmit_fragmented_chunks(struct sctp_outq *q, struct sctp_packet *pkt, ...@@ -572,7 +579,7 @@ void sctp_xmit_fragmented_chunks(struct sctp_outq *q, struct sctp_packet *pkt,
/* Get a TSN block of nfrags TSNs. */ /* Get a TSN block of nfrags TSNs. */
tsn = sctp_association_get_tsn_block(asoc, nfrags); tsn = sctp_association_get_tsn_block(asoc, nfrags);
pos = skb_peek(&q->out); pos = (struct sctp_chunk *)skb_peek(&q->out);
/* Transmit the first fragment. */ /* Transmit the first fragment. */
sctp_xmit_frag(q, pos, pkt, frag, tsn++); sctp_xmit_frag(q, pos, pkt, frag, tsn++);
...@@ -735,7 +742,7 @@ int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout) ...@@ -735,7 +742,7 @@ int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout)
} }
queue = &q->control; queue = &q->control;
while (NULL != (chunk = (sctp_chunk_t *)skb_dequeue(queue))) { while ((chunk = (sctp_chunk_t *)skb_dequeue(queue))) {
/* Pick the right transport to use. */ /* Pick the right transport to use. */
new_transport = chunk->transport; new_transport = chunk->transport;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment