Commit 015dac88 authored by Michael Hennerich's avatar Michael Hennerich Committed by David S. Miller

netdev: bfin_mac: fix performance issue found by netperf

- Remove dead long delay
- Use proper defines
- Remove broken implementation of the TX DMA Data Alignment TXDWA feature
Signed-off-by: default avatarMichael Hennerich <michael.hennerich@analog.com>
Signed-off-by: default avatarMike Frysinger <vapier@gentoo.org>
Signed-off-by: default avatarBryan Wu <cooloney@kernel.org>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 805a8ab3
...@@ -194,13 +194,13 @@ static int desc_list_init(void) ...@@ -194,13 +194,13 @@ static int desc_list_init(void)
struct dma_descriptor *b = &(r->desc_b); struct dma_descriptor *b = &(r->desc_b);
/* allocate a new skb for next time receive */ /* allocate a new skb for next time receive */
new_skb = dev_alloc_skb(PKT_BUF_SZ + 2); new_skb = dev_alloc_skb(PKT_BUF_SZ + NET_IP_ALIGN);
if (!new_skb) { if (!new_skb) {
printk(KERN_NOTICE DRV_NAME printk(KERN_NOTICE DRV_NAME
": init: low on mem - packet dropped\n"); ": init: low on mem - packet dropped\n");
goto init_error; goto init_error;
} }
skb_reserve(new_skb, 2); skb_reserve(new_skb, NET_IP_ALIGN);
r->skb = new_skb; r->skb = new_skb;
/* /*
...@@ -566,9 +566,9 @@ static void adjust_tx_list(void) ...@@ -566,9 +566,9 @@ static void adjust_tx_list(void)
*/ */
if (current_tx_ptr->next->next == tx_list_head) { if (current_tx_ptr->next->next == tx_list_head) {
while (tx_list_head->status.status_word == 0) { while (tx_list_head->status.status_word == 0) {
mdelay(1); udelay(10);
if (tx_list_head->status.status_word != 0 if (tx_list_head->status.status_word != 0
|| !(bfin_read_DMA2_IRQ_STATUS() & 0x08)) { || !(bfin_read_DMA2_IRQ_STATUS() & DMA_RUN)) {
goto adjust_head; goto adjust_head;
} }
if (timeout_cnt-- < 0) { if (timeout_cnt-- < 0) {
...@@ -606,18 +606,9 @@ static int bfin_mac_hard_start_xmit(struct sk_buff *skb, ...@@ -606,18 +606,9 @@ static int bfin_mac_hard_start_xmit(struct sk_buff *skb,
struct net_device *dev) struct net_device *dev)
{ {
u16 *data; u16 *data;
u32 data_align = (unsigned long)(skb->data) & 0x3;
current_tx_ptr->skb = skb; current_tx_ptr->skb = skb;
if (ANOMALY_05000285) {
/*
* TXDWA feature is not avaible to older revision < 0.3 silicon
* of BF537
*
* Only if data buffer is ODD WORD alignment, we do not
* need to memcpy
*/
u32 data_align = (u32)(skb->data) & 0x3;
if (data_align == 0x2) { if (data_align == 0x2) {
/* move skb->data to current_tx_ptr payload */ /* move skb->data to current_tx_ptr payload */
data = (u16 *)(skb->data) - 1; data = (u16 *)(skb->data) - 1;
...@@ -638,55 +629,6 @@ static int bfin_mac_hard_start_xmit(struct sk_buff *skb, ...@@ -638,55 +629,6 @@ static int bfin_mac_hard_start_xmit(struct sk_buff *skb,
(u32)current_tx_ptr->packet, (u32)current_tx_ptr->packet,
(u32)(current_tx_ptr->packet + skb->len + 2)); (u32)(current_tx_ptr->packet + skb->len + 2));
} }
} else {
/*
* TXDWA feature is avaible to revision < 0.3 silicon of
* BF537 and always avaible to BF52x
*/
u32 data_align = (u32)(skb->data) & 0x3;
if (data_align == 0x0) {
u16 sysctl = bfin_read_EMAC_SYSCTL();
sysctl |= TXDWA;
bfin_write_EMAC_SYSCTL(sysctl);
/* move skb->data to current_tx_ptr payload */
data = (u16 *)(skb->data) - 2;
*data = (u16)(skb->len);
current_tx_ptr->desc_a.start_addr = (u32)data;
/* this is important! */
blackfin_dcache_flush_range(
(u32)data,
(u32)((u8 *)data + skb->len + 4));
} else if (data_align == 0x2) {
u16 sysctl = bfin_read_EMAC_SYSCTL();
sysctl &= ~TXDWA;
bfin_write_EMAC_SYSCTL(sysctl);
/* move skb->data to current_tx_ptr payload */
data = (u16 *)(skb->data) - 1;
*data = (u16)(skb->len);
current_tx_ptr->desc_a.start_addr = (u32)data;
/* this is important! */
blackfin_dcache_flush_range(
(u32)data,
(u32)((u8 *)data + skb->len + 4));
} else {
u16 sysctl = bfin_read_EMAC_SYSCTL();
sysctl &= ~TXDWA;
bfin_write_EMAC_SYSCTL(sysctl);
*((u16 *)(current_tx_ptr->packet)) = (u16)(skb->len);
memcpy((u8 *)(current_tx_ptr->packet + 2), skb->data,
skb->len);
current_tx_ptr->desc_a.start_addr =
(u32)current_tx_ptr->packet;
if (current_tx_ptr->status.status_word != 0)
current_tx_ptr->status.status_word = 0;
blackfin_dcache_flush_range(
(u32)current_tx_ptr->packet,
(u32)(current_tx_ptr->packet + skb->len + 2));
}
}
/* make sure the internal data buffers in the core are drained /* make sure the internal data buffers in the core are drained
* so that the DMA descriptors are completely written when the * so that the DMA descriptors are completely written when the
...@@ -698,7 +640,7 @@ static int bfin_mac_hard_start_xmit(struct sk_buff *skb, ...@@ -698,7 +640,7 @@ static int bfin_mac_hard_start_xmit(struct sk_buff *skb,
current_tx_ptr->desc_a.config |= DMAEN; current_tx_ptr->desc_a.config |= DMAEN;
/* tx dma is running, just return */ /* tx dma is running, just return */
if (bfin_read_DMA2_IRQ_STATUS() & 0x08) if (bfin_read_DMA2_IRQ_STATUS() & DMA_RUN)
goto out; goto out;
/* tx dma is not running */ /* tx dma is not running */
...@@ -724,7 +666,7 @@ static void bfin_mac_rx(struct net_device *dev) ...@@ -724,7 +666,7 @@ static void bfin_mac_rx(struct net_device *dev)
/* allocate a new skb for next time receive */ /* allocate a new skb for next time receive */
skb = current_rx_ptr->skb; skb = current_rx_ptr->skb;
new_skb = dev_alloc_skb(PKT_BUF_SZ + 2); new_skb = dev_alloc_skb(PKT_BUF_SZ + NET_IP_ALIGN);
if (!new_skb) { if (!new_skb) {
printk(KERN_NOTICE DRV_NAME printk(KERN_NOTICE DRV_NAME
": rx: low on mem - packet dropped\n"); ": rx: low on mem - packet dropped\n");
...@@ -732,7 +674,7 @@ static void bfin_mac_rx(struct net_device *dev) ...@@ -732,7 +674,7 @@ static void bfin_mac_rx(struct net_device *dev)
goto out; goto out;
} }
/* reserve 2 bytes for RXDWA padding */ /* reserve 2 bytes for RXDWA padding */
skb_reserve(new_skb, 2); skb_reserve(new_skb, NET_IP_ALIGN);
current_rx_ptr->skb = new_skb; current_rx_ptr->skb = new_skb;
current_rx_ptr->desc_a.start_addr = (unsigned long)new_skb->data - 2; current_rx_ptr->desc_a.start_addr = (unsigned long)new_skb->data - 2;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment