Commit 66d06757 authored by Petri Gynther's avatar Petri Gynther Committed by David S. Miller

net: bcmgenet: simplify __bcmgenet_tx_reclaim()

1. Use c_index and ring->c_index to determine how many TxCBs/TxBDs are
   ready for cleanup
   - c_index = the current value of TDMA_CONS_INDEX
   - TDMA_CONS_INDEX is HW-incremented and auto-wraparound (0x0-0xFFFF)
   - ring->c_index = __bcmgenet_tx_reclaim() cleaned up to this point on
     the previous invocation

2. Add bcmgenet_tx_ring->clean_ptr
   - index of the next TxCB to be cleaned
   - incremented as TxCBs/TxBDs are processed
   - value always in range [ring->cb_ptr, ring->end_ptr]

3. Fix incrementing of dev->stats.tx_packets
   - should be incremented only when tx_cb_ptr->skb != NULL

These changes simplify __bcmgenet_tx_reclaim(). Furthermore, Tx ring size
can now be any value.

With the old code, Tx ring size had to be a power-of-2:
   num_tx_bds = ring->size;
   c_index &= (num_tx_bds - 1);
   last_c_index &= (num_tx_bds - 1);
Signed-off-by: default avatarPetri Gynther <pgynther@google.com>
Reviewed-by: default avatarFlorian Fainelli <f.fainelli@gmail.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent f93eb4ba
...@@ -978,39 +978,32 @@ static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev, ...@@ -978,39 +978,32 @@ static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev,
struct bcmgenet_tx_ring *ring) struct bcmgenet_tx_ring *ring)
{ {
struct bcmgenet_priv *priv = netdev_priv(dev); struct bcmgenet_priv *priv = netdev_priv(dev);
int last_tx_cn, last_c_index, num_tx_bds;
struct enet_cb *tx_cb_ptr; struct enet_cb *tx_cb_ptr;
struct netdev_queue *txq; struct netdev_queue *txq;
unsigned int pkts_compl = 0; unsigned int pkts_compl = 0;
unsigned int bds_compl;
unsigned int c_index; unsigned int c_index;
unsigned int txbds_ready;
unsigned int txbds_processed = 0;
/* Compute how many buffers are transmitted since last xmit call */ /* Compute how many buffers are transmitted since last xmit call */
c_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_CONS_INDEX); c_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_CONS_INDEX);
txq = netdev_get_tx_queue(dev, ring->queue); c_index &= DMA_C_INDEX_MASK;
last_c_index = ring->c_index;
num_tx_bds = ring->size;
c_index &= (num_tx_bds - 1);
if (c_index >= last_c_index) if (likely(c_index >= ring->c_index))
last_tx_cn = c_index - last_c_index; txbds_ready = c_index - ring->c_index;
else else
last_tx_cn = num_tx_bds - last_c_index + c_index; txbds_ready = (DMA_C_INDEX_MASK + 1) - ring->c_index + c_index;
netif_dbg(priv, tx_done, dev, netif_dbg(priv, tx_done, dev,
"%s ring=%d index=%d last_tx_cn=%d last_index=%d\n", "%s ring=%d old_c_index=%u c_index=%u txbds_ready=%u\n",
__func__, ring->index, __func__, ring->index, ring->c_index, c_index, txbds_ready);
c_index, last_tx_cn, last_c_index);
/* Reclaim transmitted buffers */ /* Reclaim transmitted buffers */
while (last_tx_cn-- > 0) { while (txbds_processed < txbds_ready) {
tx_cb_ptr = ring->cbs + last_c_index; tx_cb_ptr = &priv->tx_cbs[ring->clean_ptr];
bds_compl = 0;
if (tx_cb_ptr->skb) { if (tx_cb_ptr->skb) {
pkts_compl++; pkts_compl++;
bds_compl = skb_shinfo(tx_cb_ptr->skb)->nr_frags + 1; dev->stats.tx_packets++;
dev->stats.tx_bytes += tx_cb_ptr->skb->len; dev->stats.tx_bytes += tx_cb_ptr->skb->len;
dma_unmap_single(&dev->dev, dma_unmap_single(&dev->dev,
dma_unmap_addr(tx_cb_ptr, dma_addr), dma_unmap_addr(tx_cb_ptr, dma_addr),
...@@ -1026,20 +1019,23 @@ static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev, ...@@ -1026,20 +1019,23 @@ static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev,
DMA_TO_DEVICE); DMA_TO_DEVICE);
dma_unmap_addr_set(tx_cb_ptr, dma_addr, 0); dma_unmap_addr_set(tx_cb_ptr, dma_addr, 0);
} }
dev->stats.tx_packets++;
ring->free_bds += bds_compl;
last_c_index++; txbds_processed++;
last_c_index &= (num_tx_bds - 1); if (likely(ring->clean_ptr < ring->end_ptr))
ring->clean_ptr++;
else
ring->clean_ptr = ring->cb_ptr;
} }
ring->free_bds += txbds_processed;
ring->c_index = (ring->c_index + txbds_processed) & DMA_C_INDEX_MASK;
if (ring->free_bds > (MAX_SKB_FRAGS + 1)) { if (ring->free_bds > (MAX_SKB_FRAGS + 1)) {
txq = netdev_get_tx_queue(dev, ring->queue);
if (netif_tx_queue_stopped(txq)) if (netif_tx_queue_stopped(txq))
netif_tx_wake_queue(txq); netif_tx_wake_queue(txq);
} }
ring->c_index = c_index;
return pkts_compl; return pkts_compl;
} }
...@@ -1734,6 +1730,7 @@ static void bcmgenet_init_tx_ring(struct bcmgenet_priv *priv, ...@@ -1734,6 +1730,7 @@ static void bcmgenet_init_tx_ring(struct bcmgenet_priv *priv,
} }
ring->cbs = priv->tx_cbs + start_ptr; ring->cbs = priv->tx_cbs + start_ptr;
ring->size = size; ring->size = size;
ring->clean_ptr = start_ptr;
ring->c_index = 0; ring->c_index = 0;
ring->free_bds = size; ring->free_bds = size;
ring->write_ptr = start_ptr; ring->write_ptr = start_ptr;
......
...@@ -525,6 +525,7 @@ struct bcmgenet_tx_ring { ...@@ -525,6 +525,7 @@ struct bcmgenet_tx_ring {
unsigned int queue; /* queue index */ unsigned int queue; /* queue index */
struct enet_cb *cbs; /* tx ring buffer control block*/ struct enet_cb *cbs; /* tx ring buffer control block*/
unsigned int size; /* size of each tx ring */ unsigned int size; /* size of each tx ring */
unsigned int clean_ptr; /* Tx ring clean pointer */
unsigned int c_index; /* last consumer index of each ring*/ unsigned int c_index; /* last consumer index of each ring*/
unsigned int free_bds; /* # of free bds for each ring */ unsigned int free_bds; /* # of free bds for each ring */
unsigned int write_ptr; /* Tx ring write pointer SW copy */ unsigned int write_ptr; /* Tx ring write pointer SW copy */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment