Commit 71791dc8 authored by Andre Przywara's avatar Andre Przywara Committed by David S. Miller

net: axienet: Check for DMA mapping errors

Especially with the default 32-bit DMA mask, DMA buffers are a limited
resource, so their allocation can fail.
So as the DMA API documentation requires, add error checking code after
dma_map_single() calls to catch the case where we run out of "low" memory.
Signed-off-by: default avatarAndre Przywara <andre.przywara@arm.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent ab365c33
...@@ -248,6 +248,11 @@ static int axienet_dma_bd_init(struct net_device *ndev) ...@@ -248,6 +248,11 @@ static int axienet_dma_bd_init(struct net_device *ndev)
skb->data, skb->data,
lp->max_frm_size, lp->max_frm_size,
DMA_FROM_DEVICE); DMA_FROM_DEVICE);
if (dma_mapping_error(ndev->dev.parent, lp->rx_bd_v[i].phys)) {
netdev_err(ndev, "DMA mapping error\n");
goto out;
}
lp->rx_bd_v[i].cntrl = lp->max_frm_size; lp->rx_bd_v[i].cntrl = lp->max_frm_size;
} }
...@@ -679,6 +684,7 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev) ...@@ -679,6 +684,7 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
dma_addr_t tail_p; dma_addr_t tail_p;
struct axienet_local *lp = netdev_priv(ndev); struct axienet_local *lp = netdev_priv(ndev);
struct axidma_bd *cur_p; struct axidma_bd *cur_p;
u32 orig_tail_ptr = lp->tx_bd_tail;
num_frag = skb_shinfo(skb)->nr_frags; num_frag = skb_shinfo(skb)->nr_frags;
cur_p = &lp->tx_bd_v[lp->tx_bd_tail]; cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
...@@ -714,9 +720,15 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev) ...@@ -714,9 +720,15 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
cur_p->app0 |= 2; /* Tx Full Checksum Offload Enabled */ cur_p->app0 |= 2; /* Tx Full Checksum Offload Enabled */
} }
cur_p->cntrl = skb_headlen(skb) | XAXIDMA_BD_CTRL_TXSOF_MASK;
cur_p->phys = dma_map_single(ndev->dev.parent, skb->data, cur_p->phys = dma_map_single(ndev->dev.parent, skb->data,
skb_headlen(skb), DMA_TO_DEVICE); skb_headlen(skb), DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(ndev->dev.parent, cur_p->phys))) {
if (net_ratelimit())
netdev_err(ndev, "TX DMA mapping error\n");
ndev->stats.tx_dropped++;
return NETDEV_TX_OK;
}
cur_p->cntrl = skb_headlen(skb) | XAXIDMA_BD_CTRL_TXSOF_MASK;
for (ii = 0; ii < num_frag; ii++) { for (ii = 0; ii < num_frag; ii++) {
if (++lp->tx_bd_tail >= lp->tx_bd_num) if (++lp->tx_bd_tail >= lp->tx_bd_num)
...@@ -727,6 +739,16 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev) ...@@ -727,6 +739,16 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
skb_frag_address(frag), skb_frag_address(frag),
skb_frag_size(frag), skb_frag_size(frag),
DMA_TO_DEVICE); DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(ndev->dev.parent, cur_p->phys))) {
if (net_ratelimit())
netdev_err(ndev, "TX DMA mapping error\n");
ndev->stats.tx_dropped++;
axienet_free_tx_chain(ndev, orig_tail_ptr, ii + 1,
NULL);
lp->tx_bd_tail = orig_tail_ptr;
return NETDEV_TX_OK;
}
cur_p->cntrl = skb_frag_size(frag); cur_p->cntrl = skb_frag_size(frag);
} }
...@@ -807,6 +829,13 @@ static void axienet_recv(struct net_device *ndev) ...@@ -807,6 +829,13 @@ static void axienet_recv(struct net_device *ndev)
cur_p->phys = dma_map_single(ndev->dev.parent, new_skb->data, cur_p->phys = dma_map_single(ndev->dev.parent, new_skb->data,
lp->max_frm_size, lp->max_frm_size,
DMA_FROM_DEVICE); DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(ndev->dev.parent, cur_p->phys))) {
if (net_ratelimit())
netdev_err(ndev, "RX DMA mapping error\n");
dev_kfree_skb(new_skb);
return;
}
cur_p->cntrl = lp->max_frm_size; cur_p->cntrl = lp->max_frm_size;
cur_p->status = 0; cur_p->status = 0;
cur_p->skb = new_skb; cur_p->skb = new_skb;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment