Commit 484d802d authored by Florian Fainelli's avatar Florian Fainelli Committed by David S. Miller

net: systemport: Rewrite __bcm_sysport_tx_reclaim()

There is no need for complex checking between the last consumed index
and current consumed index, a simple subtraction will do.

This also eliminates the possibility of a permanent transmit queue stall
under the following conditions:

- one CPU bursts ring->size worth of traffic (up to 256 buffers), to the
  point where we run out of free descriptors, so we stop the transmit
  queue at the end of bcm_sysport_xmit()

- because of our locking, we have the transmit process disable
  interrupts which means we can be blocking the TX reclamation process

- when TX reclamation finally runs, we will be computing the difference
  between ring->c_index (last consumed index by SW) and what the HW
  reports through its register

- this register is masked with (ring->size - 1) = 0xff, which will lead
  to stripping the upper bits of the index (register is 16-bits wide)

- we will be computing last_tx_cn as 0, which means there is no work to
  be done, and we never wake-up the transmit queue, leaving it
  permanently disabled

A practical example is e.g: ring->c_index aka last_c_index = 12, we
pushed 256 entries, HW consumer index = 268, we mask it with 0xff = 12,
so last_tx_cn == 0, nothing happens.

Fixes: 80105bef ("net: systemport: add Broadcom SYSTEMPORT Ethernet MAC driver")
Signed-off-by: default avatarFlorian Fainelli <f.fainelli@gmail.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 2cc683e8
...@@ -855,10 +855,12 @@ static void bcm_sysport_tx_reclaim_one(struct bcm_sysport_tx_ring *ring, ...@@ -855,10 +855,12 @@ static void bcm_sysport_tx_reclaim_one(struct bcm_sysport_tx_ring *ring,
static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv, static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv,
struct bcm_sysport_tx_ring *ring) struct bcm_sysport_tx_ring *ring)
{ {
unsigned int c_index, last_c_index, last_tx_cn, num_tx_cbs;
unsigned int pkts_compl = 0, bytes_compl = 0; unsigned int pkts_compl = 0, bytes_compl = 0;
struct net_device *ndev = priv->netdev; struct net_device *ndev = priv->netdev;
unsigned int txbds_processed = 0;
struct bcm_sysport_cb *cb; struct bcm_sysport_cb *cb;
unsigned int txbds_ready;
unsigned int c_index;
u32 hw_ind; u32 hw_ind;
/* Clear status before servicing to reduce spurious interrupts */ /* Clear status before servicing to reduce spurious interrupts */
...@@ -871,29 +873,23 @@ static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv, ...@@ -871,29 +873,23 @@ static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv,
/* Compute how many descriptors have been processed since last call */ /* Compute how many descriptors have been processed since last call */
hw_ind = tdma_readl(priv, TDMA_DESC_RING_PROD_CONS_INDEX(ring->index)); hw_ind = tdma_readl(priv, TDMA_DESC_RING_PROD_CONS_INDEX(ring->index));
c_index = (hw_ind >> RING_CONS_INDEX_SHIFT) & RING_CONS_INDEX_MASK; c_index = (hw_ind >> RING_CONS_INDEX_SHIFT) & RING_CONS_INDEX_MASK;
ring->p_index = (hw_ind & RING_PROD_INDEX_MASK); txbds_ready = (c_index - ring->c_index) & RING_CONS_INDEX_MASK;
last_c_index = ring->c_index;
num_tx_cbs = ring->size;
c_index &= (num_tx_cbs - 1);
if (c_index >= last_c_index)
last_tx_cn = c_index - last_c_index;
else
last_tx_cn = num_tx_cbs - last_c_index + c_index;
netif_dbg(priv, tx_done, ndev, netif_dbg(priv, tx_done, ndev,
"ring=%d c_index=%d last_tx_cn=%d last_c_index=%d\n", "ring=%d old_c_index=%u c_index=%u txbds_ready=%u\n",
ring->index, c_index, last_tx_cn, last_c_index); ring->index, ring->c_index, c_index, txbds_ready);
while (last_tx_cn-- > 0) { while (txbds_processed < txbds_ready) {
cb = ring->cbs + last_c_index; cb = &ring->cbs[ring->clean_index];
bcm_sysport_tx_reclaim_one(ring, cb, &bytes_compl, &pkts_compl); bcm_sysport_tx_reclaim_one(ring, cb, &bytes_compl, &pkts_compl);
ring->desc_count++; ring->desc_count++;
last_c_index++; txbds_processed++;
last_c_index &= (num_tx_cbs - 1);
if (likely(ring->clean_index < ring->size - 1))
ring->clean_index++;
else
ring->clean_index = 0;
} }
u64_stats_update_begin(&priv->syncp); u64_stats_update_begin(&priv->syncp);
...@@ -1394,6 +1390,7 @@ static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv *priv, ...@@ -1394,6 +1390,7 @@ static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv *priv,
netif_tx_napi_add(priv->netdev, &ring->napi, bcm_sysport_tx_poll, 64); netif_tx_napi_add(priv->netdev, &ring->napi, bcm_sysport_tx_poll, 64);
ring->index = index; ring->index = index;
ring->size = size; ring->size = size;
ring->clean_index = 0;
ring->alloc_size = ring->size; ring->alloc_size = ring->size;
ring->desc_cpu = p; ring->desc_cpu = p;
ring->desc_count = ring->size; ring->desc_count = ring->size;
......
...@@ -706,7 +706,7 @@ struct bcm_sysport_tx_ring { ...@@ -706,7 +706,7 @@ struct bcm_sysport_tx_ring {
unsigned int desc_count; /* Number of descriptors */ unsigned int desc_count; /* Number of descriptors */
unsigned int curr_desc; /* Current descriptor */ unsigned int curr_desc; /* Current descriptor */
unsigned int c_index; /* Last consumer index */ unsigned int c_index; /* Last consumer index */
unsigned int p_index; /* Current producer index */ unsigned int clean_index; /* Current clean index */
struct bcm_sysport_cb *cbs; /* Transmit control blocks */ struct bcm_sysport_cb *cbs; /* Transmit control blocks */
struct dma_desc *desc_cpu; /* CPU view of the descriptor */ struct dma_desc *desc_cpu; /* CPU view of the descriptor */
struct bcm_sysport_priv *priv; /* private context backpointer */ struct bcm_sysport_priv *priv; /* private context backpointer */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment