Commit 364a97f5 authored by Roman Yeryomin's avatar Roman Yeryomin Committed by David S. Miller

net: korina: optimize rx descriptor flags processing

Signed-off-by: default avatarRoman Yeryomin <roman@advem.lv>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 7ce103b4
...@@ -363,59 +363,60 @@ static int korina_rx(struct net_device *dev, int limit) ...@@ -363,59 +363,60 @@ static int korina_rx(struct net_device *dev, int limit)
if ((KORINA_RBSIZE - (u32)DMA_COUNT(rd->control)) == 0) if ((KORINA_RBSIZE - (u32)DMA_COUNT(rd->control)) == 0)
break; break;
/* Update statistics counters */ /* check that this is a whole packet
if (devcs & ETH_RX_CRC) * WARNING: DMA_FD bit incorrectly set
dev->stats.rx_crc_errors++; * in Rc32434 (errata ref #077) */
if (devcs & ETH_RX_LOR) if (!(devcs & ETH_RX_LD))
dev->stats.rx_length_errors++; goto next;
if (devcs & ETH_RX_LE)
dev->stats.rx_length_errors++; if (!(devcs & ETH_RX_ROK)) {
if (devcs & ETH_RX_OVR) /* Update statistics counters */
dev->stats.rx_fifo_errors++;
if (devcs & ETH_RX_CV)
dev->stats.rx_frame_errors++;
if (devcs & ETH_RX_CES)
dev->stats.rx_length_errors++;
if (devcs & ETH_RX_MP)
dev->stats.multicast++;
if ((devcs & ETH_RX_LD) != ETH_RX_LD) {
/* check that this is a whole packet
* WARNING: DMA_FD bit incorrectly set
* in Rc32434 (errata ref #077) */
dev->stats.rx_errors++; dev->stats.rx_errors++;
dev->stats.rx_dropped++; dev->stats.rx_dropped++;
} else if ((devcs & ETH_RX_ROK)) { if (devcs & ETH_RX_CRC)
pkt_len = RCVPKT_LENGTH(devcs); dev->stats.rx_crc_errors++;
if (devcs & ETH_RX_LE)
dev->stats.rx_length_errors++;
if (devcs & ETH_RX_OVR)
dev->stats.rx_fifo_errors++;
if (devcs & ETH_RX_CV)
dev->stats.rx_frame_errors++;
if (devcs & ETH_RX_CES)
dev->stats.rx_frame_errors++;
goto next;
}
/* must be the (first and) last pkt_len = RCVPKT_LENGTH(devcs);
* descriptor then */
pkt_buf = (u8 *)lp->rx_skb[lp->rx_next_done]->data;
/* invalidate the cache */ /* must be the (first and) last
dma_cache_inv((unsigned long)pkt_buf, pkt_len - 4); * descriptor then */
pkt_buf = (u8 *)lp->rx_skb[lp->rx_next_done]->data;
/* Malloc up new buffer. */ /* invalidate the cache */
skb_new = netdev_alloc_skb_ip_align(dev, KORINA_RBSIZE); dma_cache_inv((unsigned long)pkt_buf, pkt_len - 4);
if (!skb_new) /* Malloc up new buffer. */
break; skb_new = netdev_alloc_skb_ip_align(dev, KORINA_RBSIZE);
/* Do not count the CRC */
skb_put(skb, pkt_len - 4);
skb->protocol = eth_type_trans(skb, dev);
/* Pass the packet to upper layers */ if (!skb_new)
netif_receive_skb(skb); break;
dev->stats.rx_packets++; /* Do not count the CRC */
dev->stats.rx_bytes += pkt_len; skb_put(skb, pkt_len - 4);
skb->protocol = eth_type_trans(skb, dev);
/* Update the mcast stats */ /* Pass the packet to upper layers */
if (devcs & ETH_RX_MP) netif_receive_skb(skb);
dev->stats.multicast++; dev->stats.rx_packets++;
dev->stats.rx_bytes += pkt_len;
lp->rx_skb[lp->rx_next_done] = skb_new; /* Update the mcast stats */
} if (devcs & ETH_RX_MP)
dev->stats.multicast++;
lp->rx_skb[lp->rx_next_done] = skb_new;
next:
rd->devcs = 0; rd->devcs = 0;
/* Restore descriptor's curr_addr */ /* Restore descriptor's curr_addr */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment