Commit 92b9ccd3 authored by Rafał Miłecki's avatar Rafał Miłecki Committed by David S. Miller

bgmac: pass received packet to the netif instead of copying it

Copying whole packets with skb_copy_from_linear_data_offset is a pretty
bad idea. CPU was spending time in __copy_user_common and network
performance was lower. With the new solution iperf-measured speed
increased from 116Mb/s to 134Mb/s.
Signed-off-by: default avatarRafał Miłecki <zajec5@gmail.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 3af390e2
...@@ -315,7 +315,6 @@ static int bgmac_dma_rx_read(struct bgmac *bgmac, struct bgmac_dma_ring *ring, ...@@ -315,7 +315,6 @@ static int bgmac_dma_rx_read(struct bgmac *bgmac, struct bgmac_dma_ring *ring,
struct device *dma_dev = bgmac->core->dma_dev; struct device *dma_dev = bgmac->core->dma_dev;
struct bgmac_slot_info *slot = &ring->slots[ring->start]; struct bgmac_slot_info *slot = &ring->slots[ring->start];
struct sk_buff *skb = slot->skb; struct sk_buff *skb = slot->skb;
struct sk_buff *new_skb;
struct bgmac_rx_header *rx; struct bgmac_rx_header *rx;
u16 len, flags; u16 len, flags;
...@@ -328,38 +327,51 @@ static int bgmac_dma_rx_read(struct bgmac *bgmac, struct bgmac_dma_ring *ring, ...@@ -328,38 +327,51 @@ static int bgmac_dma_rx_read(struct bgmac *bgmac, struct bgmac_dma_ring *ring,
len = le16_to_cpu(rx->len); len = le16_to_cpu(rx->len);
flags = le16_to_cpu(rx->flags); flags = le16_to_cpu(rx->flags);
/* Check for poison and drop or pass the packet */ do {
if (len == 0xdead && flags == 0xbeef) { dma_addr_t old_dma_addr = slot->dma_addr;
bgmac_err(bgmac, "Found poisoned packet at slot %d, DMA issue!\n", int err;
ring->start);
} else { /* Check for poison and drop or pass the packet */
if (len == 0xdead && flags == 0xbeef) {
bgmac_err(bgmac, "Found poisoned packet at slot %d, DMA issue!\n",
ring->start);
dma_sync_single_for_device(dma_dev,
slot->dma_addr,
BGMAC_RX_BUF_SIZE,
DMA_FROM_DEVICE);
break;
}
/* Omit CRC. */ /* Omit CRC. */
len -= ETH_FCS_LEN; len -= ETH_FCS_LEN;
new_skb = netdev_alloc_skb_ip_align(bgmac->net_dev, len); /* Prepare new skb as replacement */
if (new_skb) { err = bgmac_dma_rx_skb_for_slot(bgmac, slot);
skb_put(new_skb, len); if (err) {
skb_copy_from_linear_data_offset(skb, BGMAC_RX_FRAME_OFFSET, /* Poison the old skb */
new_skb->data, rx->len = cpu_to_le16(0xdead);
len); rx->flags = cpu_to_le16(0xbeef);
skb_checksum_none_assert(skb);
new_skb->protocol = dma_sync_single_for_device(dma_dev,
eth_type_trans(new_skb, bgmac->net_dev); slot->dma_addr,
netif_receive_skb(new_skb); BGMAC_RX_BUF_SIZE,
handled++; DMA_FROM_DEVICE);
} else { break;
bgmac->net_dev->stats.rx_dropped++;
bgmac_err(bgmac, "Allocation of skb for copying packet failed!\n");
} }
bgmac_dma_rx_setup_desc(bgmac, ring, ring->start);
/* Poison the old skb */ /* Unmap old skb, we'll pass it to the netfif */
rx->len = cpu_to_le16(0xdead); dma_unmap_single(dma_dev, old_dma_addr,
rx->flags = cpu_to_le16(0xbeef); BGMAC_RX_BUF_SIZE, DMA_FROM_DEVICE);
}
skb_put(skb, BGMAC_RX_FRAME_OFFSET + len);
skb_pull(skb, BGMAC_RX_FRAME_OFFSET);
/* Make it back accessible to the hardware */ skb_checksum_none_assert(skb);
dma_sync_single_for_device(dma_dev, slot->dma_addr, skb->protocol = eth_type_trans(skb, bgmac->net_dev);
BGMAC_RX_BUF_SIZE, DMA_FROM_DEVICE); netif_receive_skb(skb);
handled++;
} while (0);
if (++ring->start >= BGMAC_RX_RING_SLOTS) if (++ring->start >= BGMAC_RX_RING_SLOTS)
ring->start = 0; ring->start = 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment