Commit 364a97f5 authored by Roman Yeryomin's avatar Roman Yeryomin Committed by David S. Miller

net: korina: optimize rx descriptor flags processing

Signed-off-by: default avatarRoman Yeryomin <roman@advem.lv>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 7ce103b4
......@@ -363,59 +363,60 @@ static int korina_rx(struct net_device *dev, int limit)
if ((KORINA_RBSIZE - (u32)DMA_COUNT(rd->control)) == 0)
break;
/* Update statistics counters */
if (devcs & ETH_RX_CRC)
dev->stats.rx_crc_errors++;
if (devcs & ETH_RX_LOR)
dev->stats.rx_length_errors++;
if (devcs & ETH_RX_LE)
dev->stats.rx_length_errors++;
if (devcs & ETH_RX_OVR)
dev->stats.rx_fifo_errors++;
if (devcs & ETH_RX_CV)
dev->stats.rx_frame_errors++;
if (devcs & ETH_RX_CES)
dev->stats.rx_length_errors++;
if (devcs & ETH_RX_MP)
dev->stats.multicast++;
if ((devcs & ETH_RX_LD) != ETH_RX_LD) {
/* check that this is a whole packet
* WARNING: DMA_FD bit incorrectly set
* in Rc32434 (errata ref #077) */
/* check that this is a whole packet
* WARNING: DMA_FD bit incorrectly set
* in Rc32434 (errata ref #077) */
if (!(devcs & ETH_RX_LD))
goto next;
if (!(devcs & ETH_RX_ROK)) {
/* Update statistics counters */
dev->stats.rx_errors++;
dev->stats.rx_dropped++;
} else if ((devcs & ETH_RX_ROK)) {
pkt_len = RCVPKT_LENGTH(devcs);
if (devcs & ETH_RX_CRC)
dev->stats.rx_crc_errors++;
if (devcs & ETH_RX_LE)
dev->stats.rx_length_errors++;
if (devcs & ETH_RX_OVR)
dev->stats.rx_fifo_errors++;
if (devcs & ETH_RX_CV)
dev->stats.rx_frame_errors++;
if (devcs & ETH_RX_CES)
dev->stats.rx_frame_errors++;
goto next;
}
/* must be the (first and) last
* descriptor then */
pkt_buf = (u8 *)lp->rx_skb[lp->rx_next_done]->data;
pkt_len = RCVPKT_LENGTH(devcs);
/* invalidate the cache */
dma_cache_inv((unsigned long)pkt_buf, pkt_len - 4);
/* must be the (first and) last
* descriptor then */
pkt_buf = (u8 *)lp->rx_skb[lp->rx_next_done]->data;
/* Malloc up new buffer. */
skb_new = netdev_alloc_skb_ip_align(dev, KORINA_RBSIZE);
/* invalidate the cache */
dma_cache_inv((unsigned long)pkt_buf, pkt_len - 4);
if (!skb_new)
break;
/* Do not count the CRC */
skb_put(skb, pkt_len - 4);
skb->protocol = eth_type_trans(skb, dev);
/* Malloc up new buffer. */
skb_new = netdev_alloc_skb_ip_align(dev, KORINA_RBSIZE);
/* Pass the packet to upper layers */
netif_receive_skb(skb);
dev->stats.rx_packets++;
dev->stats.rx_bytes += pkt_len;
if (!skb_new)
break;
/* Do not count the CRC */
skb_put(skb, pkt_len - 4);
skb->protocol = eth_type_trans(skb, dev);
/* Update the mcast stats */
if (devcs & ETH_RX_MP)
dev->stats.multicast++;
/* Pass the packet to upper layers */
netif_receive_skb(skb);
dev->stats.rx_packets++;
dev->stats.rx_bytes += pkt_len;
lp->rx_skb[lp->rx_next_done] = skb_new;
}
/* Update the mcast stats */
if (devcs & ETH_RX_MP)
dev->stats.multicast++;
lp->rx_skb[lp->rx_next_done] = skb_new;
next:
rd->devcs = 0;
/* Restore descriptor's curr_addr */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment