Commit b4c9784c authored by Niklas Cassel's avatar Niklas Cassel Committed by David S. Miller

net: stmmac: WARN if tx_skbuff entries are reused before cleared

The current code assumes that a tx_skbuff entry has been cleared
by stmmac_tx_clean() before stmmac_xmit()/stmmac_tso_xmit()
assigns a new skb to that entry. However, since we never check
the current value before overwriting it, it is theoretically
possible that a non-NULL value is overwritten.

Add WARN_ONs to verify that each entry in tx_skbuff is NULL
before it is assigned a new value.
Signed-off-by: default avatarNiklas Cassel <niklas.cassel@axis.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent f66b533d
...@@ -2794,6 +2794,7 @@ static void stmmac_tso_allocator(struct stmmac_priv *priv, unsigned int des, ...@@ -2794,6 +2794,7 @@ static void stmmac_tso_allocator(struct stmmac_priv *priv, unsigned int des,
while (tmp_len > 0) { while (tmp_len > 0) {
tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE); tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
desc = tx_q->dma_tx + tx_q->cur_tx; desc = tx_q->dma_tx + tx_q->cur_tx;
desc->des0 = cpu_to_le32(des + (total_len - tmp_len)); desc->des0 = cpu_to_le32(des + (total_len - tmp_len));
...@@ -2878,6 +2879,7 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -2878,6 +2879,7 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
priv->hw->desc->set_mss(mss_desc, mss); priv->hw->desc->set_mss(mss_desc, mss);
tx_q->mss = mss; tx_q->mss = mss;
tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE); tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
} }
if (netif_msg_tx_queued(priv)) { if (netif_msg_tx_queued(priv)) {
...@@ -2888,6 +2890,7 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -2888,6 +2890,7 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
} }
first_entry = tx_q->cur_tx; first_entry = tx_q->cur_tx;
WARN_ON(tx_q->tx_skbuff[first_entry]);
desc = tx_q->dma_tx + first_entry; desc = tx_q->dma_tx + first_entry;
first = desc; first = desc;
...@@ -3062,6 +3065,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -3062,6 +3065,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
entry = tx_q->cur_tx; entry = tx_q->cur_tx;
first_entry = entry; first_entry = entry;
WARN_ON(tx_q->tx_skbuff[first_entry]);
csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL); csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
...@@ -3090,6 +3094,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -3090,6 +3094,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
bool last_segment = (i == (nfrags - 1)); bool last_segment = (i == (nfrags - 1));
entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE); entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
WARN_ON(tx_q->tx_skbuff[entry]);
if (likely(priv->extend_desc)) if (likely(priv->extend_desc))
desc = (struct dma_desc *)(tx_q->dma_etx + entry); desc = (struct dma_desc *)(tx_q->dma_etx + entry);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment