Commit a21bb8ba authored by françois romieu's avatar françois romieu Committed by David S. Miller

via-rhine: allocate and map receive buffer in a single transaction

It's used to initialize the receive ring but it will actually shine when
the receive poll code is reworked.
Signed-off-by: default avatarFrancois Romieu <romieu@fr.zoreil.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent e45af497
...@@ -1213,12 +1213,47 @@ static void free_ring(struct net_device* dev) ...@@ -1213,12 +1213,47 @@ static void free_ring(struct net_device* dev)
} }
static void alloc_rbufs(struct net_device *dev) struct rhine_skb_dma {
struct sk_buff *skb;
dma_addr_t dma;
};
static inline int rhine_skb_dma_init(struct net_device *dev,
struct rhine_skb_dma *sd)
{ {
struct rhine_private *rp = netdev_priv(dev); struct rhine_private *rp = netdev_priv(dev);
struct device *hwdev = dev->dev.parent; struct device *hwdev = dev->dev.parent;
const int size = rp->rx_buf_sz;
sd->skb = netdev_alloc_skb(dev, size);
if (!sd->skb)
return -ENOMEM;
sd->dma = dma_map_single(hwdev, sd->skb->data, size, DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(hwdev, sd->dma))) {
netif_err(rp, drv, dev, "Rx DMA mapping failure\n");
dev_kfree_skb_any(sd->skb);
return -EIO;
}
return 0;
}
static inline void rhine_skb_dma_nic_store(struct rhine_private *rp,
struct rhine_skb_dma *sd, int entry)
{
rp->rx_skbuff_dma[entry] = sd->dma;
rp->rx_skbuff[entry] = sd->skb;
rp->rx_ring[entry].addr = cpu_to_le32(sd->dma);
dma_wmb();
}
static void alloc_rbufs(struct net_device *dev)
{
struct rhine_private *rp = netdev_priv(dev);
dma_addr_t next; dma_addr_t next;
int i; int rc, i;
rp->dirty_rx = rp->cur_rx = 0; rp->dirty_rx = rp->cur_rx = 0;
...@@ -1239,20 +1274,14 @@ static void alloc_rbufs(struct net_device *dev) ...@@ -1239,20 +1274,14 @@ static void alloc_rbufs(struct net_device *dev)
/* Fill in the Rx buffers. Handle allocation failure gracefully. */ /* Fill in the Rx buffers. Handle allocation failure gracefully. */
for (i = 0; i < RX_RING_SIZE; i++) { for (i = 0; i < RX_RING_SIZE; i++) {
struct sk_buff *skb = netdev_alloc_skb(dev, rp->rx_buf_sz); struct rhine_skb_dma sd;
rp->rx_skbuff[i] = skb;
if (skb == NULL)
break;
rp->rx_skbuff_dma[i] = rc = rhine_skb_dma_init(dev, &sd);
dma_map_single(hwdev, skb->data, rp->rx_buf_sz, if (rc < 0)
DMA_FROM_DEVICE);
if (dma_mapping_error(hwdev, rp->rx_skbuff_dma[i])) {
rp->rx_skbuff_dma[i] = 0;
dev_kfree_skb(skb);
break; break;
}
rp->rx_ring[i].addr = cpu_to_le32(rp->rx_skbuff_dma[i]); rhine_skb_dma_nic_store(rp, &sd, i);
rp->rx_ring[i].rx_status = cpu_to_le32(DescOwn); rp->rx_ring[i].rx_status = cpu_to_le32(DescOwn);
} }
rp->dirty_rx = (unsigned int)(i - RX_RING_SIZE); rp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment