Commit 99ab08e0 authored by Lennert Buytenhek's avatar Lennert Buytenhek Committed by Lennert Buytenhek

mv643xx_eth: replace array of skbs awaiting transmit completion with a queue

Get rid of the skb pointer array that we currently use for transmit
reclaim, and replace it with an skb queue, to which skbuffs are appended
when they are passed to the xmit function, and removed from the front
and freed when we do transmit queue reclaim and hit a descriptor with
the 'owned by device' bit clear and 'last descriptor' bit set.
Signed-off-by: default avatarLennert Buytenhek <buytenh@marvell.com>
parent a418950c
...@@ -336,7 +336,8 @@ struct tx_queue { ...@@ -336,7 +336,8 @@ struct tx_queue {
struct tx_desc *tx_desc_area; struct tx_desc *tx_desc_area;
dma_addr_t tx_desc_dma; dma_addr_t tx_desc_dma;
int tx_desc_area_size; int tx_desc_area_size;
struct sk_buff **tx_skb;
struct sk_buff_head tx_skb;
unsigned long tx_packets; unsigned long tx_packets;
unsigned long tx_bytes; unsigned long tx_bytes;
...@@ -676,10 +677,8 @@ static void txq_submit_frag_skb(struct tx_queue *txq, struct sk_buff *skb) ...@@ -676,10 +677,8 @@ static void txq_submit_frag_skb(struct tx_queue *txq, struct sk_buff *skb)
desc->cmd_sts = BUFFER_OWNED_BY_DMA | desc->cmd_sts = BUFFER_OWNED_BY_DMA |
ZERO_PADDING | TX_LAST_DESC | ZERO_PADDING | TX_LAST_DESC |
TX_ENABLE_INTERRUPT; TX_ENABLE_INTERRUPT;
txq->tx_skb[tx_index] = skb;
} else { } else {
desc->cmd_sts = BUFFER_OWNED_BY_DMA; desc->cmd_sts = BUFFER_OWNED_BY_DMA;
txq->tx_skb[tx_index] = NULL;
} }
desc->l4i_chk = 0; desc->l4i_chk = 0;
...@@ -712,13 +711,10 @@ static void txq_submit_skb(struct tx_queue *txq, struct sk_buff *skb) ...@@ -712,13 +711,10 @@ static void txq_submit_skb(struct tx_queue *txq, struct sk_buff *skb)
if (nr_frags) { if (nr_frags) {
txq_submit_frag_skb(txq, skb); txq_submit_frag_skb(txq, skb);
length = skb_headlen(skb); length = skb_headlen(skb);
txq->tx_skb[tx_index] = NULL;
} else { } else {
cmd_sts |= ZERO_PADDING | TX_LAST_DESC | TX_ENABLE_INTERRUPT; cmd_sts |= ZERO_PADDING | TX_LAST_DESC | TX_ENABLE_INTERRUPT;
length = skb->len; length = skb->len;
txq->tx_skb[tx_index] = skb;
} }
desc->byte_cnt = length; desc->byte_cnt = length;
...@@ -772,6 +768,8 @@ static void txq_submit_skb(struct tx_queue *txq, struct sk_buff *skb) ...@@ -772,6 +768,8 @@ static void txq_submit_skb(struct tx_queue *txq, struct sk_buff *skb)
desc->l4i_chk = 0; desc->l4i_chk = 0;
} }
__skb_queue_tail(&txq->tx_skb, skb);
/* ensure all other descriptors are written before first cmd_sts */ /* ensure all other descriptors are written before first cmd_sts */
wmb(); wmb();
desc->cmd_sts = cmd_sts; desc->cmd_sts = cmd_sts;
...@@ -884,8 +882,9 @@ static int txq_reclaim(struct tx_queue *txq, int budget, int force) ...@@ -884,8 +882,9 @@ static int txq_reclaim(struct tx_queue *txq, int budget, int force)
reclaimed++; reclaimed++;
txq->tx_desc_count--; txq->tx_desc_count--;
skb = txq->tx_skb[tx_index]; skb = NULL;
txq->tx_skb[tx_index] = NULL; if (cmd_sts & TX_LAST_DESC)
skb = __skb_dequeue(&txq->tx_skb);
if (cmd_sts & ERROR_SUMMARY) { if (cmd_sts & ERROR_SUMMARY) {
dev_printk(KERN_INFO, &mp->dev->dev, "tx error\n"); dev_printk(KERN_INFO, &mp->dev->dev, "tx error\n");
...@@ -1692,18 +1691,11 @@ static int txq_init(struct mv643xx_eth_private *mp, int index) ...@@ -1692,18 +1691,11 @@ static int txq_init(struct mv643xx_eth_private *mp, int index)
if (txq->tx_desc_area == NULL) { if (txq->tx_desc_area == NULL) {
dev_printk(KERN_ERR, &mp->dev->dev, dev_printk(KERN_ERR, &mp->dev->dev,
"can't allocate tx ring (%d bytes)\n", size); "can't allocate tx ring (%d bytes)\n", size);
goto out; return -ENOMEM;
} }
memset(txq->tx_desc_area, 0, size); memset(txq->tx_desc_area, 0, size);
txq->tx_desc_area_size = size; txq->tx_desc_area_size = size;
txq->tx_skb = kmalloc(txq->tx_ring_size * sizeof(*txq->tx_skb),
GFP_KERNEL);
if (txq->tx_skb == NULL) {
dev_printk(KERN_ERR, &mp->dev->dev,
"can't allocate tx skb ring\n");
goto out_free;
}
tx_desc = (struct tx_desc *)txq->tx_desc_area; tx_desc = (struct tx_desc *)txq->tx_desc_area;
for (i = 0; i < txq->tx_ring_size; i++) { for (i = 0; i < txq->tx_ring_size; i++) {
...@@ -1719,18 +1711,9 @@ static int txq_init(struct mv643xx_eth_private *mp, int index) ...@@ -1719,18 +1711,9 @@ static int txq_init(struct mv643xx_eth_private *mp, int index)
nexti * sizeof(struct tx_desc); nexti * sizeof(struct tx_desc);
} }
return 0; skb_queue_head_init(&txq->tx_skb);
out_free: return 0;
if (index == 0 && size <= mp->tx_desc_sram_size)
iounmap(txq->tx_desc_area);
else
dma_free_coherent(NULL, size,
txq->tx_desc_area,
txq->tx_desc_dma);
out:
return -ENOMEM;
} }
static void txq_deinit(struct tx_queue *txq) static void txq_deinit(struct tx_queue *txq)
...@@ -1748,8 +1731,6 @@ static void txq_deinit(struct tx_queue *txq) ...@@ -1748,8 +1731,6 @@ static void txq_deinit(struct tx_queue *txq)
else else
dma_free_coherent(NULL, txq->tx_desc_area_size, dma_free_coherent(NULL, txq->tx_desc_area_size,
txq->tx_desc_area, txq->tx_desc_dma); txq->tx_desc_area, txq->tx_desc_dma);
kfree(txq->tx_skb);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment