Commit e8d8c5d8 authored by Jakub Kicinski's avatar Jakub Kicinski

bnxt: make sure xmit_more + errors does not miss doorbells

skbs are freed on error and not put on the ring. We may, however,
be in a situation where we're freeing the last skb of a batch,
and there is a doorbell ring pending because of xmit_more() being
true earlier. Make sure we ring the door bell in such situations.

Since errors are rare don't pay attention to xmit_more() and just
always flush the pending frames.

The busy case should be safe to be left alone because it can
only happen if start_xmit races with completions and they
both enable the queue. In that case the kick can't be pending.

Noticed while reading the code.

Fixes: 4d172f21 ("bnxt_en: Implement xmit_more.")
Reviewed-by: default avatarMichael Chan <michael.chan@broadcom.com>
Reviewed-by: default avatarEdwin Peer <edwin.peer@broadcom.com>
Signed-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parent 01cca6b9
...@@ -72,7 +72,8 @@ ...@@ -72,7 +72,8 @@
#include "bnxt_debugfs.h" #include "bnxt_debugfs.h"
#define BNXT_TX_TIMEOUT (5 * HZ) #define BNXT_TX_TIMEOUT (5 * HZ)
#define BNXT_DEF_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_HW) #define BNXT_DEF_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_HW | \
NETIF_MSG_TX_ERR)
MODULE_LICENSE("GPL"); MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Broadcom BCM573xx network driver"); MODULE_DESCRIPTION("Broadcom BCM573xx network driver");
...@@ -365,6 +366,13 @@ static u16 bnxt_xmit_get_cfa_action(struct sk_buff *skb) ...@@ -365,6 +366,13 @@ static u16 bnxt_xmit_get_cfa_action(struct sk_buff *skb)
return md_dst->u.port_info.port_id; return md_dst->u.port_info.port_id;
} }
static void bnxt_txr_db_kick(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
u16 prod)
{
bnxt_db_write(bp, &txr->tx_db, prod);
txr->kick_pending = 0;
}
static bool bnxt_txr_netif_try_stop_queue(struct bnxt *bp, static bool bnxt_txr_netif_try_stop_queue(struct bnxt *bp,
struct bnxt_tx_ring_info *txr, struct bnxt_tx_ring_info *txr,
struct netdev_queue *txq) struct netdev_queue *txq)
...@@ -413,6 +421,10 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -413,6 +421,10 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
free_size = bnxt_tx_avail(bp, txr); free_size = bnxt_tx_avail(bp, txr);
if (unlikely(free_size < skb_shinfo(skb)->nr_frags + 2)) { if (unlikely(free_size < skb_shinfo(skb)->nr_frags + 2)) {
/* We must have raced with NAPI cleanup */
if (net_ratelimit() && txr->kick_pending)
netif_warn(bp, tx_err, dev,
"bnxt: ring busy w/ flush pending!\n");
if (bnxt_txr_netif_try_stop_queue(bp, txr, txq)) if (bnxt_txr_netif_try_stop_queue(bp, txr, txq))
return NETDEV_TX_BUSY; return NETDEV_TX_BUSY;
} }
...@@ -537,21 +549,16 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -537,21 +549,16 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
normal_tx: normal_tx:
if (length < BNXT_MIN_PKT_SIZE) { if (length < BNXT_MIN_PKT_SIZE) {
pad = BNXT_MIN_PKT_SIZE - length; pad = BNXT_MIN_PKT_SIZE - length;
if (skb_pad(skb, pad)) { if (skb_pad(skb, pad))
/* SKB already freed. */ /* SKB already freed. */
tx_buf->skb = NULL; goto tx_kick_pending;
return NETDEV_TX_OK;
}
length = BNXT_MIN_PKT_SIZE; length = BNXT_MIN_PKT_SIZE;
} }
mapping = dma_map_single(&pdev->dev, skb->data, len, DMA_TO_DEVICE); mapping = dma_map_single(&pdev->dev, skb->data, len, DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(&pdev->dev, mapping))) { if (unlikely(dma_mapping_error(&pdev->dev, mapping)))
dev_kfree_skb_any(skb); goto tx_free;
tx_buf->skb = NULL;
return NETDEV_TX_OK;
}
dma_unmap_addr_set(tx_buf, mapping, mapping); dma_unmap_addr_set(tx_buf, mapping, mapping);
flags = (len << TX_BD_LEN_SHIFT) | TX_BD_TYPE_LONG_TX_BD | flags = (len << TX_BD_LEN_SHIFT) | TX_BD_TYPE_LONG_TX_BD |
...@@ -638,13 +645,15 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -638,13 +645,15 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
txr->tx_prod = prod; txr->tx_prod = prod;
if (!netdev_xmit_more() || netif_xmit_stopped(txq)) if (!netdev_xmit_more() || netif_xmit_stopped(txq))
bnxt_db_write(bp, &txr->tx_db, prod); bnxt_txr_db_kick(bp, txr, prod);
else
txr->kick_pending = 1;
tx_done: tx_done:
if (unlikely(bnxt_tx_avail(bp, txr) <= MAX_SKB_FRAGS + 1)) { if (unlikely(bnxt_tx_avail(bp, txr) <= MAX_SKB_FRAGS + 1)) {
if (netdev_xmit_more() && !tx_buf->is_push) if (netdev_xmit_more() && !tx_buf->is_push)
bnxt_db_write(bp, &txr->tx_db, prod); bnxt_txr_db_kick(bp, txr, prod);
bnxt_txr_netif_try_stop_queue(bp, txr, txq); bnxt_txr_netif_try_stop_queue(bp, txr, txq);
} }
...@@ -659,7 +668,6 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -659,7 +668,6 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* start back at beginning and unmap skb */ /* start back at beginning and unmap skb */
prod = txr->tx_prod; prod = txr->tx_prod;
tx_buf = &txr->tx_buf_ring[prod]; tx_buf = &txr->tx_buf_ring[prod];
tx_buf->skb = NULL;
dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping), dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
skb_headlen(skb), PCI_DMA_TODEVICE); skb_headlen(skb), PCI_DMA_TODEVICE);
prod = NEXT_TX(prod); prod = NEXT_TX(prod);
...@@ -673,7 +681,12 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -673,7 +681,12 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
PCI_DMA_TODEVICE); PCI_DMA_TODEVICE);
} }
tx_free:
dev_kfree_skb_any(skb); dev_kfree_skb_any(skb);
tx_kick_pending:
if (txr->kick_pending)
bnxt_txr_db_kick(bp, txr, txr->tx_prod);
txr->tx_buf_ring[txr->tx_prod].skb = NULL;
return NETDEV_TX_OK; return NETDEV_TX_OK;
} }
......
...@@ -786,6 +786,7 @@ struct bnxt_tx_ring_info { ...@@ -786,6 +786,7 @@ struct bnxt_tx_ring_info {
u16 tx_prod; u16 tx_prod;
u16 tx_cons; u16 tx_cons;
u16 txq_index; u16 txq_index;
u8 kick_pending;
struct bnxt_db_info tx_db; struct bnxt_db_info tx_db;
struct tx_bd *tx_desc_ring[MAX_TX_PAGES]; struct tx_bd *tx_desc_ring[MAX_TX_PAGES];
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment