Commit 9914cad5 authored by Al Viro's avatar Al Viro Committed by Jeff Garzik

3c359 endianness annotations and fixes

Same story as with olympic - htons(readw()) when swab16(readw()) is needed,
missing conversions to le32 when dealing with shared descriptors, etc.
Olympic got those fixes in 2.4.0-test2, 3c359 didn't.
Signed-off-by: default avatarAl Viro <viro@zeniv.linux.org.uk>
Signed-off-by: default avatarJeff Garzik <jeff@garzik.org>
parent cc154ac6
...@@ -570,7 +570,7 @@ static int xl_open(struct net_device *dev) ...@@ -570,7 +570,7 @@ static int xl_open(struct net_device *dev)
struct xl_private *xl_priv=netdev_priv(dev); struct xl_private *xl_priv=netdev_priv(dev);
u8 __iomem *xl_mmio = xl_priv->xl_mmio ; u8 __iomem *xl_mmio = xl_priv->xl_mmio ;
u8 i ; u8 i ;
u16 hwaddr[3] ; /* Should be u8[6] but we get word return values */ __le16 hwaddr[3] ; /* Should be u8[6] but we get word return values */
int open_err ; int open_err ;
u16 switchsettings, switchsettings_eeprom ; u16 switchsettings, switchsettings_eeprom ;
...@@ -580,15 +580,12 @@ static int xl_open(struct net_device *dev) ...@@ -580,15 +580,12 @@ static int xl_open(struct net_device *dev)
} }
/* /*
* Read the information from the EEPROM that we need. I know we * Read the information from the EEPROM that we need.
* should use ntohs, but the word gets stored reversed in the 16
* bit field anyway and it all works its self out when we memcpy
* it into dev->dev_addr.
*/ */
hwaddr[0] = xl_ee_read(dev,0x10) ; hwaddr[0] = cpu_to_le16(xl_ee_read(dev,0x10));
hwaddr[1] = xl_ee_read(dev,0x11) ; hwaddr[1] = cpu_to_le16(xl_ee_read(dev,0x11));
hwaddr[2] = xl_ee_read(dev,0x12) ; hwaddr[2] = cpu_to_le16(xl_ee_read(dev,0x12));
/* Ring speed */ /* Ring speed */
...@@ -665,8 +662,8 @@ static int xl_open(struct net_device *dev) ...@@ -665,8 +662,8 @@ static int xl_open(struct net_device *dev)
break ; break ;
skb->dev = dev ; skb->dev = dev ;
xl_priv->xl_rx_ring[i].upfragaddr = pci_map_single(xl_priv->pdev, skb->data,xl_priv->pkt_buf_sz, PCI_DMA_FROMDEVICE) ; xl_priv->xl_rx_ring[i].upfragaddr = cpu_to_le32(pci_map_single(xl_priv->pdev, skb->data,xl_priv->pkt_buf_sz, PCI_DMA_FROMDEVICE));
xl_priv->xl_rx_ring[i].upfraglen = xl_priv->pkt_buf_sz | RXUPLASTFRAG; xl_priv->xl_rx_ring[i].upfraglen = cpu_to_le32(xl_priv->pkt_buf_sz) | RXUPLASTFRAG;
xl_priv->rx_ring_skb[i] = skb ; xl_priv->rx_ring_skb[i] = skb ;
} }
...@@ -680,7 +677,7 @@ static int xl_open(struct net_device *dev) ...@@ -680,7 +677,7 @@ static int xl_open(struct net_device *dev)
xl_priv->rx_ring_tail = 0 ; xl_priv->rx_ring_tail = 0 ;
xl_priv->rx_ring_dma_addr = pci_map_single(xl_priv->pdev,xl_priv->xl_rx_ring, sizeof(struct xl_rx_desc) * XL_RX_RING_SIZE, PCI_DMA_TODEVICE) ; xl_priv->rx_ring_dma_addr = pci_map_single(xl_priv->pdev,xl_priv->xl_rx_ring, sizeof(struct xl_rx_desc) * XL_RX_RING_SIZE, PCI_DMA_TODEVICE) ;
for (i=0;i<(xl_priv->rx_ring_no-1);i++) { for (i=0;i<(xl_priv->rx_ring_no-1);i++) {
xl_priv->xl_rx_ring[i].upnextptr = xl_priv->rx_ring_dma_addr + (sizeof (struct xl_rx_desc) * (i+1)) ; xl_priv->xl_rx_ring[i].upnextptr = cpu_to_le32(xl_priv->rx_ring_dma_addr + (sizeof (struct xl_rx_desc) * (i+1)));
} }
xl_priv->xl_rx_ring[i].upnextptr = 0 ; xl_priv->xl_rx_ring[i].upnextptr = 0 ;
...@@ -698,7 +695,7 @@ static int xl_open(struct net_device *dev) ...@@ -698,7 +695,7 @@ static int xl_open(struct net_device *dev)
* Setup the first dummy DPD entry for polling to start working. * Setup the first dummy DPD entry for polling to start working.
*/ */
xl_priv->xl_tx_ring[0].framestartheader = TXDPDEMPTY ; xl_priv->xl_tx_ring[0].framestartheader = TXDPDEMPTY;
xl_priv->xl_tx_ring[0].buffer = 0 ; xl_priv->xl_tx_ring[0].buffer = 0 ;
xl_priv->xl_tx_ring[0].buffer_length = 0 ; xl_priv->xl_tx_ring[0].buffer_length = 0 ;
xl_priv->xl_tx_ring[0].dnnextptr = 0 ; xl_priv->xl_tx_ring[0].dnnextptr = 0 ;
...@@ -811,17 +808,17 @@ static int xl_open_hw(struct net_device *dev) ...@@ -811,17 +808,17 @@ static int xl_open_hw(struct net_device *dev)
return open_err ; return open_err ;
} else { } else {
writel( (MEM_WORD_READ | 0xD0000 | xl_priv->srb) + 8, xl_mmio + MMIO_MAC_ACCESS_CMD) ; writel( (MEM_WORD_READ | 0xD0000 | xl_priv->srb) + 8, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
xl_priv->asb = ntohs(readw(xl_mmio + MMIO_MACDATA)) ; xl_priv->asb = swab16(readw(xl_mmio + MMIO_MACDATA)) ;
printk(KERN_INFO "%s: Adapter Opened Details: ",dev->name) ; printk(KERN_INFO "%s: Adapter Opened Details: ",dev->name) ;
printk("ASB: %04x",xl_priv->asb ) ; printk("ASB: %04x",xl_priv->asb ) ;
writel( (MEM_WORD_READ | 0xD0000 | xl_priv->srb) + 10, xl_mmio + MMIO_MAC_ACCESS_CMD) ; writel( (MEM_WORD_READ | 0xD0000 | xl_priv->srb) + 10, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
printk(", SRB: %04x",ntohs(readw(xl_mmio + MMIO_MACDATA)) ) ; printk(", SRB: %04x",swab16(readw(xl_mmio + MMIO_MACDATA)) ) ;
writel( (MEM_WORD_READ | 0xD0000 | xl_priv->srb) + 12, xl_mmio + MMIO_MAC_ACCESS_CMD) ; writel( (MEM_WORD_READ | 0xD0000 | xl_priv->srb) + 12, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
xl_priv->arb = ntohs(readw(xl_mmio + MMIO_MACDATA)) ; xl_priv->arb = swab16(readw(xl_mmio + MMIO_MACDATA)) ;
printk(", ARB: %04x \n",xl_priv->arb ) ; printk(", ARB: %04x \n",xl_priv->arb ) ;
writel( (MEM_WORD_READ | 0xD0000 | xl_priv->srb) + 14, xl_mmio + MMIO_MAC_ACCESS_CMD) ; writel( (MEM_WORD_READ | 0xD0000 | xl_priv->srb) + 14, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
vsoff = ntohs(readw(xl_mmio + MMIO_MACDATA)) ; vsoff = swab16(readw(xl_mmio + MMIO_MACDATA)) ;
/* /*
* Interesting, sending the individual characters directly to printk was causing klogd to use * Interesting, sending the individual characters directly to printk was causing klogd to use
...@@ -873,16 +870,15 @@ static int xl_open_hw(struct net_device *dev) ...@@ -873,16 +870,15 @@ static int xl_open_hw(struct net_device *dev)
static void adv_rx_ring(struct net_device *dev) /* Advance rx_ring, cut down on bloat in xl_rx */ static void adv_rx_ring(struct net_device *dev) /* Advance rx_ring, cut down on bloat in xl_rx */
{ {
struct xl_private *xl_priv=netdev_priv(dev); struct xl_private *xl_priv=netdev_priv(dev);
int prev_ring_loc ; int n = xl_priv->rx_ring_tail;
int prev_ring_loc;
prev_ring_loc = (xl_priv->rx_ring_tail + XL_RX_RING_SIZE - 1) & (XL_RX_RING_SIZE - 1);
xl_priv->xl_rx_ring[prev_ring_loc].upnextptr = xl_priv->rx_ring_dma_addr + (sizeof (struct xl_rx_desc) * xl_priv->rx_ring_tail) ; prev_ring_loc = (n + XL_RX_RING_SIZE - 1) & (XL_RX_RING_SIZE - 1);
xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].framestatus = 0 ; xl_priv->xl_rx_ring[prev_ring_loc].upnextptr = cpu_to_le32(xl_priv->rx_ring_dma_addr + (sizeof (struct xl_rx_desc) * n));
xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upnextptr = 0 ; xl_priv->xl_rx_ring[n].framestatus = 0;
xl_priv->rx_ring_tail++ ; xl_priv->xl_rx_ring[n].upnextptr = 0;
xl_priv->rx_ring_tail &= (XL_RX_RING_SIZE-1) ; xl_priv->rx_ring_tail++;
xl_priv->rx_ring_tail &= (XL_RX_RING_SIZE-1);
return ;
} }
static void xl_rx(struct net_device *dev) static void xl_rx(struct net_device *dev)
...@@ -914,7 +910,7 @@ static void xl_rx(struct net_device *dev) ...@@ -914,7 +910,7 @@ static void xl_rx(struct net_device *dev)
temp_ring_loc &= (XL_RX_RING_SIZE-1) ; temp_ring_loc &= (XL_RX_RING_SIZE-1) ;
} }
frame_length = xl_priv->xl_rx_ring[temp_ring_loc].framestatus & 0x7FFF ; frame_length = le32_to_cpu(xl_priv->xl_rx_ring[temp_ring_loc].framestatus) & 0x7FFF;
skb = dev_alloc_skb(frame_length) ; skb = dev_alloc_skb(frame_length) ;
...@@ -931,29 +927,29 @@ static void xl_rx(struct net_device *dev) ...@@ -931,29 +927,29 @@ static void xl_rx(struct net_device *dev)
} }
while (xl_priv->rx_ring_tail != temp_ring_loc) { while (xl_priv->rx_ring_tail != temp_ring_loc) {
copy_len = xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upfraglen & 0x7FFF ; copy_len = le32_to_cpu(xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upfraglen) & 0x7FFF;
frame_length -= copy_len ; frame_length -= copy_len ;
pci_dma_sync_single_for_cpu(xl_priv->pdev,xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upfragaddr,xl_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ; pci_dma_sync_single_for_cpu(xl_priv->pdev,le32_to_cpu(xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upfragaddr),xl_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE);
skb_copy_from_linear_data(xl_priv->rx_ring_skb[xl_priv->rx_ring_tail], skb_copy_from_linear_data(xl_priv->rx_ring_skb[xl_priv->rx_ring_tail],
skb_put(skb, copy_len), skb_put(skb, copy_len),
copy_len); copy_len);
pci_dma_sync_single_for_device(xl_priv->pdev,xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upfragaddr,xl_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ; pci_dma_sync_single_for_device(xl_priv->pdev,le32_to_cpu(xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upfragaddr),xl_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE);
adv_rx_ring(dev) ; adv_rx_ring(dev) ;
} }
/* Now we have found the last fragment */ /* Now we have found the last fragment */
pci_dma_sync_single_for_cpu(xl_priv->pdev,xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upfragaddr,xl_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ; pci_dma_sync_single_for_cpu(xl_priv->pdev,le32_to_cpu(xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upfragaddr),xl_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE);
skb_copy_from_linear_data(xl_priv->rx_ring_skb[xl_priv->rx_ring_tail], skb_copy_from_linear_data(xl_priv->rx_ring_skb[xl_priv->rx_ring_tail],
skb_put(skb,copy_len), frame_length); skb_put(skb,copy_len), frame_length);
/* memcpy(skb_put(skb,frame_length), bus_to_virt(xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upfragaddr), frame_length) ; */ /* memcpy(skb_put(skb,frame_length), bus_to_virt(xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upfragaddr), frame_length) ; */
pci_dma_sync_single_for_device(xl_priv->pdev,xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upfragaddr,xl_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ; pci_dma_sync_single_for_device(xl_priv->pdev,le32_to_cpu(xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upfragaddr),xl_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE);
adv_rx_ring(dev) ; adv_rx_ring(dev) ;
skb->protocol = tr_type_trans(skb,dev) ; skb->protocol = tr_type_trans(skb,dev) ;
netif_rx(skb) ; netif_rx(skb) ;
} else { /* Single Descriptor Used, simply swap buffers over, fast path */ } else { /* Single Descriptor Used, simply swap buffers over, fast path */
frame_length = xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].framestatus & 0x7FFF ; frame_length = le32_to_cpu(xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].framestatus) & 0x7FFF;
skb = dev_alloc_skb(xl_priv->pkt_buf_sz) ; skb = dev_alloc_skb(xl_priv->pkt_buf_sz) ;
...@@ -966,13 +962,13 @@ static void xl_rx(struct net_device *dev) ...@@ -966,13 +962,13 @@ static void xl_rx(struct net_device *dev)
} }
skb2 = xl_priv->rx_ring_skb[xl_priv->rx_ring_tail] ; skb2 = xl_priv->rx_ring_skb[xl_priv->rx_ring_tail] ;
pci_unmap_single(xl_priv->pdev, xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upfragaddr, xl_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ; pci_unmap_single(xl_priv->pdev, le32_to_cpu(xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upfragaddr), xl_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ;
skb_put(skb2, frame_length) ; skb_put(skb2, frame_length) ;
skb2->protocol = tr_type_trans(skb2,dev) ; skb2->protocol = tr_type_trans(skb2,dev) ;
xl_priv->rx_ring_skb[xl_priv->rx_ring_tail] = skb ; xl_priv->rx_ring_skb[xl_priv->rx_ring_tail] = skb ;
xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upfragaddr = pci_map_single(xl_priv->pdev,skb->data,xl_priv->pkt_buf_sz, PCI_DMA_FROMDEVICE) ; xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upfragaddr = cpu_to_le32(pci_map_single(xl_priv->pdev,skb->data,xl_priv->pkt_buf_sz, PCI_DMA_FROMDEVICE));
xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upfraglen = xl_priv->pkt_buf_sz | RXUPLASTFRAG ; xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upfraglen = cpu_to_le32(xl_priv->pkt_buf_sz) | RXUPLASTFRAG;
adv_rx_ring(dev) ; adv_rx_ring(dev) ;
xl_priv->xl_stats.rx_packets++ ; xl_priv->xl_stats.rx_packets++ ;
xl_priv->xl_stats.rx_bytes += frame_length ; xl_priv->xl_stats.rx_bytes += frame_length ;
...@@ -1022,7 +1018,7 @@ static void xl_freemem(struct net_device *dev) ...@@ -1022,7 +1018,7 @@ static void xl_freemem(struct net_device *dev)
for (i=0;i<XL_RX_RING_SIZE;i++) { for (i=0;i<XL_RX_RING_SIZE;i++) {
dev_kfree_skb_irq(xl_priv->rx_ring_skb[xl_priv->rx_ring_tail]) ; dev_kfree_skb_irq(xl_priv->rx_ring_skb[xl_priv->rx_ring_tail]) ;
pci_unmap_single(xl_priv->pdev,xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upfragaddr,xl_priv->pkt_buf_sz, PCI_DMA_FROMDEVICE) ; pci_unmap_single(xl_priv->pdev,le32_to_cpu(xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upfragaddr),xl_priv->pkt_buf_sz, PCI_DMA_FROMDEVICE);
xl_priv->rx_ring_tail++ ; xl_priv->rx_ring_tail++ ;
xl_priv->rx_ring_tail &= XL_RX_RING_SIZE-1; xl_priv->rx_ring_tail &= XL_RX_RING_SIZE-1;
} }
...@@ -1181,9 +1177,9 @@ static int xl_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -1181,9 +1177,9 @@ static int xl_xmit(struct sk_buff *skb, struct net_device *dev)
txd = &(xl_priv->xl_tx_ring[tx_head]) ; txd = &(xl_priv->xl_tx_ring[tx_head]) ;
txd->dnnextptr = 0 ; txd->dnnextptr = 0 ;
txd->framestartheader = skb->len | TXDNINDICATE ; txd->framestartheader = cpu_to_le32(skb->len) | TXDNINDICATE;
txd->buffer = pci_map_single(xl_priv->pdev, skb->data, skb->len, PCI_DMA_TODEVICE) ; txd->buffer = cpu_to_le32(pci_map_single(xl_priv->pdev, skb->data, skb->len, PCI_DMA_TODEVICE));
txd->buffer_length = skb->len | TXDNFRAGLAST ; txd->buffer_length = cpu_to_le32(skb->len) | TXDNFRAGLAST;
xl_priv->tx_ring_skb[tx_head] = skb ; xl_priv->tx_ring_skb[tx_head] = skb ;
xl_priv->xl_stats.tx_packets++ ; xl_priv->xl_stats.tx_packets++ ;
xl_priv->xl_stats.tx_bytes += skb->len ; xl_priv->xl_stats.tx_bytes += skb->len ;
...@@ -1199,7 +1195,7 @@ static int xl_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -1199,7 +1195,7 @@ static int xl_xmit(struct sk_buff *skb, struct net_device *dev)
xl_priv->tx_ring_head &= (XL_TX_RING_SIZE - 1) ; xl_priv->tx_ring_head &= (XL_TX_RING_SIZE - 1) ;
xl_priv->free_ring_entries-- ; xl_priv->free_ring_entries-- ;
xl_priv->xl_tx_ring[tx_prev].dnnextptr = xl_priv->tx_ring_dma_addr + (sizeof (struct xl_tx_desc) * tx_head) ; xl_priv->xl_tx_ring[tx_prev].dnnextptr = cpu_to_le32(xl_priv->tx_ring_dma_addr + (sizeof (struct xl_tx_desc) * tx_head));
/* Sneaky, by doing a read on DnListPtr we can force the card to poll on the DnNextPtr */ /* Sneaky, by doing a read on DnListPtr we can force the card to poll on the DnNextPtr */
/* readl(xl_mmio + MMIO_DNLISTPTR) ; */ /* readl(xl_mmio + MMIO_DNLISTPTR) ; */
...@@ -1237,9 +1233,9 @@ static void xl_dn_comp(struct net_device *dev) ...@@ -1237,9 +1233,9 @@ static void xl_dn_comp(struct net_device *dev)
while (xl_priv->xl_tx_ring[xl_priv->tx_ring_tail].framestartheader & TXDNCOMPLETE ) { while (xl_priv->xl_tx_ring[xl_priv->tx_ring_tail].framestartheader & TXDNCOMPLETE ) {
txd = &(xl_priv->xl_tx_ring[xl_priv->tx_ring_tail]) ; txd = &(xl_priv->xl_tx_ring[xl_priv->tx_ring_tail]) ;
pci_unmap_single(xl_priv->pdev,txd->buffer, xl_priv->tx_ring_skb[xl_priv->tx_ring_tail]->len, PCI_DMA_TODEVICE) ; pci_unmap_single(xl_priv->pdev, le32_to_cpu(txd->buffer), xl_priv->tx_ring_skb[xl_priv->tx_ring_tail]->len, PCI_DMA_TODEVICE);
txd->framestartheader = 0 ; txd->framestartheader = 0 ;
txd->buffer = 0xdeadbeef ; txd->buffer = cpu_to_le32(0xdeadbeef);
txd->buffer_length = 0 ; txd->buffer_length = 0 ;
dev_kfree_skb_irq(xl_priv->tx_ring_skb[xl_priv->tx_ring_tail]) ; dev_kfree_skb_irq(xl_priv->tx_ring_skb[xl_priv->tx_ring_tail]) ;
xl_priv->tx_ring_tail++ ; xl_priv->tx_ring_tail++ ;
...@@ -1507,9 +1503,9 @@ static void xl_arb_cmd(struct net_device *dev) ...@@ -1507,9 +1503,9 @@ static void xl_arb_cmd(struct net_device *dev)
if (arb_cmd == RING_STATUS_CHANGE) { /* Ring.Status.Change */ if (arb_cmd == RING_STATUS_CHANGE) { /* Ring.Status.Change */
writel( ( (MEM_WORD_READ | 0xD0000 | xl_priv->arb) + 6), xl_mmio + MMIO_MAC_ACCESS_CMD) ; writel( ( (MEM_WORD_READ | 0xD0000 | xl_priv->arb) + 6), xl_mmio + MMIO_MAC_ACCESS_CMD) ;
printk(KERN_INFO "%s: Ring Status Change: New Status = %04x\n", dev->name, ntohs(readw(xl_mmio + MMIO_MACDATA) )) ; printk(KERN_INFO "%s: Ring Status Change: New Status = %04x\n", dev->name, swab16(readw(xl_mmio + MMIO_MACDATA) )) ;
lan_status = ntohs(readw(xl_mmio + MMIO_MACDATA)); lan_status = swab16(readw(xl_mmio + MMIO_MACDATA));
/* Acknowledge interrupt, this tells nic we are done with the arb */ /* Acknowledge interrupt, this tells nic we are done with the arb */
writel(ACK_INTERRUPT | ARBCACK | LATCH_ACK, xl_mmio + MMIO_COMMAND) ; writel(ACK_INTERRUPT | ARBCACK | LATCH_ACK, xl_mmio + MMIO_COMMAND) ;
...@@ -1573,7 +1569,7 @@ static void xl_arb_cmd(struct net_device *dev) ...@@ -1573,7 +1569,7 @@ static void xl_arb_cmd(struct net_device *dev)
printk(KERN_INFO "Received.Data \n") ; printk(KERN_INFO "Received.Data \n") ;
#endif #endif
writel( ((MEM_WORD_READ | 0xD0000 | xl_priv->arb) + 6), xl_mmio + MMIO_MAC_ACCESS_CMD) ; writel( ((MEM_WORD_READ | 0xD0000 | xl_priv->arb) + 6), xl_mmio + MMIO_MAC_ACCESS_CMD) ;
xl_priv->mac_buffer = ntohs(readw(xl_mmio + MMIO_MACDATA)) ; xl_priv->mac_buffer = swab16(readw(xl_mmio + MMIO_MACDATA)) ;
/* Now we are going to be really basic here and not do anything /* Now we are going to be really basic here and not do anything
* with the data at all. The tech docs do not give me enough * with the data at all. The tech docs do not give me enough
...@@ -1634,7 +1630,7 @@ static void xl_asb_cmd(struct net_device *dev) ...@@ -1634,7 +1630,7 @@ static void xl_asb_cmd(struct net_device *dev)
writeb(0x81, xl_mmio + MMIO_MACDATA) ; writeb(0x81, xl_mmio + MMIO_MACDATA) ;
writel(MEM_WORD_WRITE | 0xd0000 | xl_priv->asb | 6, xl_mmio + MMIO_MAC_ACCESS_CMD) ; writel(MEM_WORD_WRITE | 0xd0000 | xl_priv->asb | 6, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
writew(ntohs(xl_priv->mac_buffer), xl_mmio + MMIO_MACDATA) ; writew(swab16(xl_priv->mac_buffer), xl_mmio + MMIO_MACDATA) ;
xl_wait_misr_flags(dev) ; xl_wait_misr_flags(dev) ;
......
...@@ -156,19 +156,19 @@ ...@@ -156,19 +156,19 @@
#define HOSTERRINT (1<<1) #define HOSTERRINT (1<<1)
/* Receive descriptor bits */ /* Receive descriptor bits */
#define RXOVERRUN (1<<19) #define RXOVERRUN cpu_to_le32(1<<19)
#define RXFC (1<<21) #define RXFC cpu_to_le32(1<<21)
#define RXAR (1<<22) #define RXAR cpu_to_le32(1<<22)
#define RXUPDCOMPLETE (1<<23) #define RXUPDCOMPLETE cpu_to_le32(1<<23)
#define RXUPDFULL (1<<24) #define RXUPDFULL cpu_to_le32(1<<24)
#define RXUPLASTFRAG (1<<31) #define RXUPLASTFRAG cpu_to_le32(1<<31)
/* Transmit descriptor bits */ /* Transmit descriptor bits */
#define TXDNCOMPLETE (1<<16) #define TXDNCOMPLETE cpu_to_le32(1<<16)
#define TXTXINDICATE (1<<27) #define TXTXINDICATE cpu_to_le32(1<<27)
#define TXDPDEMPTY (1<<29) #define TXDPDEMPTY cpu_to_le32(1<<29)
#define TXDNINDICATE (1<<31) #define TXDNINDICATE cpu_to_le32(1<<31)
#define TXDNFRAGLAST (1<<31) #define TXDNFRAGLAST cpu_to_le32(1<<31)
/* Interrupts to Acknowledge */ /* Interrupts to Acknowledge */
#define LATCH_ACK 1 #define LATCH_ACK 1
...@@ -232,17 +232,17 @@ ...@@ -232,17 +232,17 @@
/* 3c359 data structures */ /* 3c359 data structures */
struct xl_tx_desc { struct xl_tx_desc {
u32 dnnextptr ; __le32 dnnextptr;
u32 framestartheader ; __le32 framestartheader;
u32 buffer ; __le32 buffer;
u32 buffer_length ; __le32 buffer_length;
}; };
struct xl_rx_desc { struct xl_rx_desc {
u32 upnextptr ; __le32 upnextptr;
u32 framestatus ; __le32 framestatus;
u32 upfragaddr ; __le32 upfragaddr;
u32 upfraglen ; __le32 upfraglen;
}; };
struct xl_private { struct xl_private {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment