Commit 74dd40bc authored by Beniamino Galvani's avatar Beniamino Galvani Committed by David S. Miller

net: arc_emac: prevent reuse of unreclaimed tx descriptors

This patch changes the logic in tx path to ensure that tx descriptors
are reused for transmission only after they have been reclaimed by
arc_emac_tx_clean().
Signed-off-by: default avatarBeniamino Galvani <b.galvani@gmail.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 7ce7679d
...@@ -29,6 +29,17 @@ ...@@ -29,6 +29,17 @@
#define DRV_NAME "arc_emac" #define DRV_NAME "arc_emac"
#define DRV_VERSION "1.0" #define DRV_VERSION "1.0"
/**
* arc_emac_tx_avail - Return the number of available slots in the tx ring.
* @priv: Pointer to ARC EMAC private data structure.
*
* returns: the number of slots available for transmission in tx the ring.
*/
static inline int arc_emac_tx_avail(struct arc_emac_priv *priv)
{
return (priv->txbd_dirty + TX_BD_NUM - priv->txbd_curr - 1) % TX_BD_NUM;
}
/** /**
* arc_emac_adjust_link - Adjust the PHY link duplex. * arc_emac_adjust_link - Adjust the PHY link duplex.
* @ndev: Pointer to the net_device structure. * @ndev: Pointer to the net_device structure.
...@@ -180,10 +191,15 @@ static void arc_emac_tx_clean(struct net_device *ndev) ...@@ -180,10 +191,15 @@ static void arc_emac_tx_clean(struct net_device *ndev)
txbd->info = 0; txbd->info = 0;
*txbd_dirty = (*txbd_dirty + 1) % TX_BD_NUM; *txbd_dirty = (*txbd_dirty + 1) % TX_BD_NUM;
}
if (netif_queue_stopped(ndev)) /* Ensure that txbd_dirty is visible to tx() before checking
* for queue stopped.
*/
smp_mb();
if (netif_queue_stopped(ndev) && arc_emac_tx_avail(priv))
netif_wake_queue(ndev); netif_wake_queue(ndev);
}
} }
/** /**
...@@ -574,11 +590,9 @@ static int arc_emac_tx(struct sk_buff *skb, struct net_device *ndev) ...@@ -574,11 +590,9 @@ static int arc_emac_tx(struct sk_buff *skb, struct net_device *ndev)
len = max_t(unsigned int, ETH_ZLEN, skb->len); len = max_t(unsigned int, ETH_ZLEN, skb->len);
/* EMAC still holds this buffer in its possession. if (unlikely(!arc_emac_tx_avail(priv))) {
* CPU must not modify this buffer descriptor
*/
if (unlikely((le32_to_cpu(*info) & OWN_MASK) == FOR_EMAC)) {
netif_stop_queue(ndev); netif_stop_queue(ndev);
netdev_err(ndev, "BUG! Tx Ring full when queue awake!\n");
return NETDEV_TX_BUSY; return NETDEV_TX_BUSY;
} }
...@@ -607,12 +621,19 @@ static int arc_emac_tx(struct sk_buff *skb, struct net_device *ndev) ...@@ -607,12 +621,19 @@ static int arc_emac_tx(struct sk_buff *skb, struct net_device *ndev)
/* Increment index to point to the next BD */ /* Increment index to point to the next BD */
*txbd_curr = (*txbd_curr + 1) % TX_BD_NUM; *txbd_curr = (*txbd_curr + 1) % TX_BD_NUM;
/* Get "info" of the next BD */ /* Ensure that tx_clean() sees the new txbd_curr before
info = &priv->txbd[*txbd_curr].info; * checking the queue status. This prevents an unneeded wake
* of the queue in tx_clean().
*/
smp_mb();
/* Check if if Tx BD ring is full - next BD is still owned by EMAC */ if (!arc_emac_tx_avail(priv)) {
if (unlikely((le32_to_cpu(*info) & OWN_MASK) == FOR_EMAC))
netif_stop_queue(ndev); netif_stop_queue(ndev);
/* Refresh tx_dirty */
smp_mb();
if (arc_emac_tx_avail(priv))
netif_start_queue(ndev);
}
arc_reg_set(priv, R_STATUS, TXPL_MASK); arc_reg_set(priv, R_STATUS, TXPL_MASK);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment