Commit e688822d authored by Alexander Kochetkov's avatar Alexander Kochetkov Committed by David S. Miller

net: arc_emac: fix arc_emac_rx() error paths

arc_emac_rx() has some issues found by code review.

In case netdev_alloc_skb_ip_align() or dma_map_single() failure
rx fifo entry will not be returned to EMAC.

In case dma_map_single() failure previously allocated skb became
lost to driver. At the same time address of newly allocated skb
will not be provided to EMAC.
Signed-off-by: default avatarAlexander Kochetkov <al.kochet@gmail.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 7352e252
...@@ -210,39 +210,48 @@ static int arc_emac_rx(struct net_device *ndev, int budget) ...@@ -210,39 +210,48 @@ static int arc_emac_rx(struct net_device *ndev, int budget)
continue; continue;
} }
pktlen = info & LEN_MASK; /* Prepare the BD for next cycle. netif_receive_skb()
stats->rx_packets++; * only if new skb was allocated and mapped to avoid holes
stats->rx_bytes += pktlen; * in the RX fifo.
skb = rx_buff->skb; */
skb_put(skb, pktlen); skb = netdev_alloc_skb_ip_align(ndev, EMAC_BUFFER_SIZE);
skb->dev = ndev; if (unlikely(!skb)) {
skb->protocol = eth_type_trans(skb, ndev); if (net_ratelimit())
netdev_err(ndev, "cannot allocate skb\n");
dma_unmap_single(&ndev->dev, dma_unmap_addr(rx_buff, addr), /* Return ownership to EMAC */
dma_unmap_len(rx_buff, len), DMA_FROM_DEVICE); rxbd->info = cpu_to_le32(FOR_EMAC | EMAC_BUFFER_SIZE);
/* Prepare the BD for next cycle */
rx_buff->skb = netdev_alloc_skb_ip_align(ndev,
EMAC_BUFFER_SIZE);
if (unlikely(!rx_buff->skb)) {
stats->rx_errors++; stats->rx_errors++;
/* Because receive_skb is below, increment rx_dropped */
stats->rx_dropped++; stats->rx_dropped++;
continue; continue;
} }
/* receive_skb only if new skb was allocated to avoid holes */ addr = dma_map_single(&ndev->dev, (void *)skb->data,
netif_receive_skb(skb);
addr = dma_map_single(&ndev->dev, (void *)rx_buff->skb->data,
EMAC_BUFFER_SIZE, DMA_FROM_DEVICE); EMAC_BUFFER_SIZE, DMA_FROM_DEVICE);
if (dma_mapping_error(&ndev->dev, addr)) { if (dma_mapping_error(&ndev->dev, addr)) {
if (net_ratelimit()) if (net_ratelimit())
netdev_err(ndev, "cannot dma map\n"); netdev_err(ndev, "cannot map dma buffer\n");
dev_kfree_skb(rx_buff->skb); dev_kfree_skb(skb);
/* Return ownership to EMAC */
rxbd->info = cpu_to_le32(FOR_EMAC | EMAC_BUFFER_SIZE);
stats->rx_errors++; stats->rx_errors++;
stats->rx_dropped++;
continue; continue;
} }
/* unmap previosly mapped skb */
dma_unmap_single(&ndev->dev, dma_unmap_addr(rx_buff, addr),
dma_unmap_len(rx_buff, len), DMA_FROM_DEVICE);
pktlen = info & LEN_MASK;
stats->rx_packets++;
stats->rx_bytes += pktlen;
skb_put(rx_buff->skb, pktlen);
rx_buff->skb->dev = ndev;
rx_buff->skb->protocol = eth_type_trans(rx_buff->skb, ndev);
netif_receive_skb(rx_buff->skb);
rx_buff->skb = skb;
dma_unmap_addr_set(rx_buff, addr, addr); dma_unmap_addr_set(rx_buff, addr, addr);
dma_unmap_len_set(rx_buff, len, EMAC_BUFFER_SIZE); dma_unmap_len_set(rx_buff, len, EMAC_BUFFER_SIZE);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment