Commit 4cc5c475 authored by Don Fry's avatar Don Fry Committed by David S. Miller

pcnet32: add missing check for pci_dma_mapping_error

The pci_map_single calls never checked for error. Add error checking
as requested by someone last year.  Tested on 79c972.
Signed-off-by: default avatarDon Fry <pcnet32@frontier.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 60e2e8b3
...@@ -565,8 +565,7 @@ static void pcnet32_realloc_rx_ring(struct net_device *dev, ...@@ -565,8 +565,7 @@ static void pcnet32_realloc_rx_ring(struct net_device *dev,
if (!new_dma_addr_list) if (!new_dma_addr_list)
goto free_new_rx_ring; goto free_new_rx_ring;
new_skb_list = kcalloc(entries, sizeof(struct sk_buff *), new_skb_list = kcalloc(entries, sizeof(struct sk_buff *), GFP_ATOMIC);
GFP_ATOMIC);
if (!new_skb_list) if (!new_skb_list)
goto free_new_lists; goto free_new_lists;
...@@ -593,6 +592,13 @@ static void pcnet32_realloc_rx_ring(struct net_device *dev, ...@@ -593,6 +592,13 @@ static void pcnet32_realloc_rx_ring(struct net_device *dev,
new_dma_addr_list[new] = new_dma_addr_list[new] =
pci_map_single(lp->pci_dev, rx_skbuff->data, pci_map_single(lp->pci_dev, rx_skbuff->data,
PKT_BUF_SIZE, PCI_DMA_FROMDEVICE); PKT_BUF_SIZE, PCI_DMA_FROMDEVICE);
if (pci_dma_mapping_error(lp->pci_dev,
new_dma_addr_list[new])) {
netif_err(lp, drv, dev, "%s dma mapping failed\n",
__func__);
dev_kfree_skb(new_skb_list[new]);
goto free_all_new;
}
new_rx_ring[new].base = cpu_to_le32(new_dma_addr_list[new]); new_rx_ring[new].base = cpu_to_le32(new_dma_addr_list[new]);
new_rx_ring[new].buf_length = cpu_to_le16(NEG_BUF_SIZE); new_rx_ring[new].buf_length = cpu_to_le16(NEG_BUF_SIZE);
new_rx_ring[new].status = cpu_to_le16(0x8000); new_rx_ring[new].status = cpu_to_le16(0x8000);
...@@ -600,8 +606,12 @@ static void pcnet32_realloc_rx_ring(struct net_device *dev, ...@@ -600,8 +606,12 @@ static void pcnet32_realloc_rx_ring(struct net_device *dev,
/* and free any unneeded buffers */ /* and free any unneeded buffers */
for (; new < lp->rx_ring_size; new++) { for (; new < lp->rx_ring_size; new++) {
if (lp->rx_skbuff[new]) { if (lp->rx_skbuff[new]) {
pci_unmap_single(lp->pci_dev, lp->rx_dma_addr[new], if (!pci_dma_mapping_error(lp->pci_dev,
PKT_BUF_SIZE, PCI_DMA_FROMDEVICE); lp->rx_dma_addr[new]))
pci_unmap_single(lp->pci_dev,
lp->rx_dma_addr[new],
PKT_BUF_SIZE,
PCI_DMA_FROMDEVICE);
dev_kfree_skb(lp->rx_skbuff[new]); dev_kfree_skb(lp->rx_skbuff[new]);
} }
} }
...@@ -625,8 +635,12 @@ static void pcnet32_realloc_rx_ring(struct net_device *dev, ...@@ -625,8 +635,12 @@ static void pcnet32_realloc_rx_ring(struct net_device *dev,
free_all_new: free_all_new:
while (--new >= lp->rx_ring_size) { while (--new >= lp->rx_ring_size) {
if (new_skb_list[new]) { if (new_skb_list[new]) {
pci_unmap_single(lp->pci_dev, new_dma_addr_list[new], if (!pci_dma_mapping_error(lp->pci_dev,
PKT_BUF_SIZE, PCI_DMA_FROMDEVICE); new_dma_addr_list[new]))
pci_unmap_single(lp->pci_dev,
new_dma_addr_list[new],
PKT_BUF_SIZE,
PCI_DMA_FROMDEVICE);
dev_kfree_skb(new_skb_list[new]); dev_kfree_skb(new_skb_list[new]);
} }
} }
...@@ -650,8 +664,12 @@ static void pcnet32_purge_rx_ring(struct net_device *dev) ...@@ -650,8 +664,12 @@ static void pcnet32_purge_rx_ring(struct net_device *dev)
lp->rx_ring[i].status = 0; /* CPU owns buffer */ lp->rx_ring[i].status = 0; /* CPU owns buffer */
wmb(); /* Make sure adapter sees owner change */ wmb(); /* Make sure adapter sees owner change */
if (lp->rx_skbuff[i]) { if (lp->rx_skbuff[i]) {
pci_unmap_single(lp->pci_dev, lp->rx_dma_addr[i], if (!pci_dma_mapping_error(lp->pci_dev,
PKT_BUF_SIZE, PCI_DMA_FROMDEVICE); lp->rx_dma_addr[i]))
pci_unmap_single(lp->pci_dev,
lp->rx_dma_addr[i],
PKT_BUF_SIZE,
PCI_DMA_FROMDEVICE);
dev_kfree_skb_any(lp->rx_skbuff[i]); dev_kfree_skb_any(lp->rx_skbuff[i]);
} }
lp->rx_skbuff[i] = NULL; lp->rx_skbuff[i] = NULL;
...@@ -930,6 +948,12 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1) ...@@ -930,6 +948,12 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
lp->tx_dma_addr[x] = lp->tx_dma_addr[x] =
pci_map_single(lp->pci_dev, skb->data, skb->len, pci_map_single(lp->pci_dev, skb->data, skb->len,
PCI_DMA_TODEVICE); PCI_DMA_TODEVICE);
if (pci_dma_mapping_error(lp->pci_dev, lp->tx_dma_addr[x])) {
netif_printk(lp, hw, KERN_DEBUG, dev,
"DMA mapping error at line: %d!\n",
__LINE__);
goto clean_up;
}
lp->tx_ring[x].base = cpu_to_le32(lp->tx_dma_addr[x]); lp->tx_ring[x].base = cpu_to_le32(lp->tx_dma_addr[x]);
wmb(); /* Make sure owner changes after all others are visible */ wmb(); /* Make sure owner changes after all others are visible */
lp->tx_ring[x].status = cpu_to_le16(status); lp->tx_ring[x].status = cpu_to_le16(status);
...@@ -1142,10 +1166,25 @@ static void pcnet32_rx_entry(struct net_device *dev, ...@@ -1142,10 +1166,25 @@ static void pcnet32_rx_entry(struct net_device *dev,
if (pkt_len > rx_copybreak) { if (pkt_len > rx_copybreak) {
struct sk_buff *newskb; struct sk_buff *newskb;
dma_addr_t new_dma_addr;
newskb = netdev_alloc_skb(dev, PKT_BUF_SKB); newskb = netdev_alloc_skb(dev, PKT_BUF_SKB);
/*
* map the new buffer, if mapping fails, drop the packet and
* reuse the old buffer
*/
if (newskb) { if (newskb) {
skb_reserve(newskb, NET_IP_ALIGN); skb_reserve(newskb, NET_IP_ALIGN);
new_dma_addr = pci_map_single(lp->pci_dev,
newskb->data,
PKT_BUF_SIZE,
PCI_DMA_FROMDEVICE);
if (pci_dma_mapping_error(lp->pci_dev, new_dma_addr)) {
netif_err(lp, rx_err, dev,
"DMA mapping error.\n");
dev_kfree_skb(newskb);
skb = NULL;
} else {
skb = lp->rx_skbuff[entry]; skb = lp->rx_skbuff[entry];
pci_unmap_single(lp->pci_dev, pci_unmap_single(lp->pci_dev,
lp->rx_dma_addr[entry], lp->rx_dma_addr[entry],
...@@ -1153,13 +1192,10 @@ static void pcnet32_rx_entry(struct net_device *dev, ...@@ -1153,13 +1192,10 @@ static void pcnet32_rx_entry(struct net_device *dev,
PCI_DMA_FROMDEVICE); PCI_DMA_FROMDEVICE);
skb_put(skb, pkt_len); skb_put(skb, pkt_len);
lp->rx_skbuff[entry] = newskb; lp->rx_skbuff[entry] = newskb;
lp->rx_dma_addr[entry] = lp->rx_dma_addr[entry] = new_dma_addr;
pci_map_single(lp->pci_dev, rxp->base = cpu_to_le32(new_dma_addr);
newskb->data,
PKT_BUF_SIZE,
PCI_DMA_FROMDEVICE);
rxp->base = cpu_to_le32(lp->rx_dma_addr[entry]);
rx_in_place = 1; rx_in_place = 1;
}
} else } else
skb = NULL; skb = NULL;
} else } else
...@@ -2229,7 +2265,10 @@ static void pcnet32_purge_tx_ring(struct net_device *dev) ...@@ -2229,7 +2265,10 @@ static void pcnet32_purge_tx_ring(struct net_device *dev)
lp->tx_ring[i].status = 0; /* CPU owns buffer */ lp->tx_ring[i].status = 0; /* CPU owns buffer */
wmb(); /* Make sure adapter sees owner change */ wmb(); /* Make sure adapter sees owner change */
if (lp->tx_skbuff[i]) { if (lp->tx_skbuff[i]) {
pci_unmap_single(lp->pci_dev, lp->tx_dma_addr[i], if (!pci_dma_mapping_error(lp->pci_dev,
lp->tx_dma_addr[i]))
pci_unmap_single(lp->pci_dev,
lp->tx_dma_addr[i],
lp->tx_skbuff[i]->len, lp->tx_skbuff[i]->len,
PCI_DMA_TODEVICE); PCI_DMA_TODEVICE);
dev_kfree_skb_any(lp->tx_skbuff[i]); dev_kfree_skb_any(lp->tx_skbuff[i]);
...@@ -2264,10 +2303,19 @@ static int pcnet32_init_ring(struct net_device *dev) ...@@ -2264,10 +2303,19 @@ static int pcnet32_init_ring(struct net_device *dev)
} }
rmb(); rmb();
if (lp->rx_dma_addr[i] == 0) if (lp->rx_dma_addr[i] == 0) {
lp->rx_dma_addr[i] = lp->rx_dma_addr[i] =
pci_map_single(lp->pci_dev, rx_skbuff->data, pci_map_single(lp->pci_dev, rx_skbuff->data,
PKT_BUF_SIZE, PCI_DMA_FROMDEVICE); PKT_BUF_SIZE, PCI_DMA_FROMDEVICE);
if (pci_dma_mapping_error(lp->pci_dev,
lp->rx_dma_addr[i])) {
/* there is not much we can do at this point */
netif_err(lp, drv, dev,
"%s pci dma mapping error\n",
__func__);
return -1;
}
}
lp->rx_ring[i].base = cpu_to_le32(lp->rx_dma_addr[i]); lp->rx_ring[i].base = cpu_to_le32(lp->rx_dma_addr[i]);
lp->rx_ring[i].buf_length = cpu_to_le16(NEG_BUF_SIZE); lp->rx_ring[i].buf_length = cpu_to_le16(NEG_BUF_SIZE);
wmb(); /* Make sure owner changes after all others are visible */ wmb(); /* Make sure owner changes after all others are visible */
...@@ -2397,9 +2445,14 @@ static netdev_tx_t pcnet32_start_xmit(struct sk_buff *skb, ...@@ -2397,9 +2445,14 @@ static netdev_tx_t pcnet32_start_xmit(struct sk_buff *skb,
lp->tx_ring[entry].misc = 0x00000000; lp->tx_ring[entry].misc = 0x00000000;
lp->tx_skbuff[entry] = skb;
lp->tx_dma_addr[entry] = lp->tx_dma_addr[entry] =
pci_map_single(lp->pci_dev, skb->data, skb->len, PCI_DMA_TODEVICE); pci_map_single(lp->pci_dev, skb->data, skb->len, PCI_DMA_TODEVICE);
if (pci_dma_mapping_error(lp->pci_dev, lp->tx_dma_addr[entry])) {
dev_kfree_skb(skb);
dev->stats.tx_dropped++;
goto drop_packet;
}
lp->tx_skbuff[entry] = skb;
lp->tx_ring[entry].base = cpu_to_le32(lp->tx_dma_addr[entry]); lp->tx_ring[entry].base = cpu_to_le32(lp->tx_dma_addr[entry]);
wmb(); /* Make sure owner changes after all others are visible */ wmb(); /* Make sure owner changes after all others are visible */
lp->tx_ring[entry].status = cpu_to_le16(status); lp->tx_ring[entry].status = cpu_to_le16(status);
...@@ -2414,6 +2467,7 @@ static netdev_tx_t pcnet32_start_xmit(struct sk_buff *skb, ...@@ -2414,6 +2467,7 @@ static netdev_tx_t pcnet32_start_xmit(struct sk_buff *skb,
lp->tx_full = 1; lp->tx_full = 1;
netif_stop_queue(dev); netif_stop_queue(dev);
} }
drop_packet:
spin_unlock_irqrestore(&lp->lock, flags); spin_unlock_irqrestore(&lp->lock, flags);
return NETDEV_TX_OK; return NETDEV_TX_OK;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment