Commit 84bc69a1 authored by Linus Torvalds's avatar Linus Torvalds

Merge bk://kernel.bkbits.net/jgarzik/net-drivers-2.5

into home.transmeta.com:/home/torvalds/v2.5/linux
parents b5939deb d857e540
...@@ -980,7 +980,7 @@ static int __devinit rtl8139_init_one (struct pci_dev *pdev, ...@@ -980,7 +980,7 @@ static int __devinit rtl8139_init_one (struct pci_dev *pdev,
dev->irq = pdev->irq; dev->irq = pdev->irq;
/* dev->priv/tp zeroed and aligned in init_etherdev */ /* dev->priv/tp zeroed and aligned in alloc_etherdev */
tp = dev->priv; tp = dev->priv;
/* note: tp->chipset set in rtl8139_init_board */ /* note: tp->chipset set in rtl8139_init_board */
...@@ -2143,9 +2143,7 @@ static int rtl8139_close (struct net_device *dev) ...@@ -2143,9 +2143,7 @@ static int rtl8139_close (struct net_device *dev)
spin_unlock_irqrestore (&tp->lock, flags); spin_unlock_irqrestore (&tp->lock, flags);
/* TODO: isn't this code racy? we synchronize the IRQ and then free it, */ synchronize_irq (dev->irq); /* racy, but that's ok here */
/* but another IRQ could've happened in between the sync and free */
synchronize_irq (dev->irq);
free_irq (dev->irq, dev); free_irq (dev->irq, dev);
rtl8139_tx_clear (tp); rtl8139_tx_clear (tp);
......
/* Advanced Micro Devices Inc. AMD8111E Linux Network Driver /* Advanced Micro Devices Inc. AMD8111E Linux Network Driver
* Copyright (C) 2002 Advanced Micro Devices * Copyright (C) 2003 Advanced Micro Devices
* *
* *
* Copyright 2001,2002 Jeff Garzik <jgarzik@mandrakesoft.com> [ 8139cp.c,tg3.c ] * Copyright 2001,2002 Jeff Garzik <jgarzik@mandrakesoft.com> [ 8139cp.c,tg3.c ]
...@@ -41,6 +41,18 @@ Module Name: ...@@ -41,6 +41,18 @@ Module Name:
Kernel Mode Kernel Mode
Revision History: Revision History:
3.0.0
Initial Revision.
3.0.1
1. Dynamic interrupt coalescing.
2. Removed prev_stats.
3. MII support.
4. Dynamic IPG support
3.0.2 05/29/2003
1. Bug fix: Fixed failure to send jumbo packets larger than 4k.
2. Bug fix: Fixed VLAN support failure.
3. Bug fix: Fixed receive interrupt coalescing bug.
4. Dynamic IPG support is disabled by default.
*/ */
...@@ -77,13 +89,16 @@ Revision History: ...@@ -77,13 +89,16 @@ Revision History:
#include "amd8111e.h" #include "amd8111e.h"
#define MODULE_NAME "amd8111e" #define MODULE_NAME "amd8111e"
#define MODULE_VERSION "3.0.0" #define MODULE_VERSION "3.0.2"
MODULE_AUTHOR("Advanced Micro Devices, Inc."); MODULE_AUTHOR("Advanced Micro Devices, Inc.");
MODULE_DESCRIPTION ("AMD8111 based 10/100 Ethernet Controller. Driver Version 3.0.0"); MODULE_DESCRIPTION ("AMD8111 based 10/100 Ethernet Controller. Driver Version 3.0.2");
MODULE_LICENSE("GPL"); MODULE_LICENSE("GPL");
MODULE_PARM(speed_duplex, "1-" __MODULE_STRING (MAX_UNITS) "i"); MODULE_PARM(speed_duplex, "1-" __MODULE_STRING (MAX_UNITS) "i");
MODULE_PARM_DESC(speed_duplex, "Set device speed and duplex modes, 0: Auto Negotitate, 1: 10Mbps Half Duplex, 2: 10Mbps Full Duplex, 3: 100Mbps Half Duplex, 4: 100Mbps Full Duplex"); MODULE_PARM_DESC(speed_duplex, "Set device speed and duplex modes, 0: Auto Negotitate, 1: 10Mbps Half Duplex, 2: 10Mbps Full Duplex, 3: 100Mbps Half Duplex, 4: 100Mbps Full Duplex");
MODULE_PARM(coalesce, "1-" __MODULE_STRING(MAX_UNITS) "i");
MODULE_PARM_DESC(coalesce, "Enable or Disable interrupt coalescing, 1: Enable, 0: Disable");
MODULE_PARM(dynamic_ipg, "1-" __MODULE_STRING(MAX_UNITS) "i");
MODULE_PARM_DESC(dynamic_ipg, "Enable or Disable dynamic IPG, 1: Enable, 0: Disable");
static struct pci_device_id amd8111e_pci_tbl[] __devinitdata = { static struct pci_device_id amd8111e_pci_tbl[] __devinitdata = {
...@@ -92,6 +107,88 @@ static struct pci_device_id amd8111e_pci_tbl[] __devinitdata = { ...@@ -92,6 +107,88 @@ static struct pci_device_id amd8111e_pci_tbl[] __devinitdata = {
{ 0, } { 0, }
}; };
/*
This function will read the PHY registers.
*/
static int amd8111e_read_phy(struct amd8111e_priv* lp, int phy_id, int reg, u32* val)
{
void * mmio = lp->mmio;
unsigned int reg_val;
unsigned int repeat= REPEAT_CNT;
reg_val = readl(mmio + PHY_ACCESS);
while (reg_val & PHY_CMD_ACTIVE)
reg_val = readl( mmio + PHY_ACCESS );
writel( PHY_RD_CMD | ((phy_id & 0x1f) << 21) |
((reg & 0x1f) << 16), mmio +PHY_ACCESS);
do{
reg_val = readl(mmio + PHY_ACCESS);
udelay(30); /* It takes 30 us to read/write data */
} while (--repeat && (reg_val & PHY_CMD_ACTIVE));
if(reg_val & PHY_RD_ERR)
goto err_phy_read;
*val = reg_val & 0xffff;
return 0;
err_phy_read:
*val = 0;
return -EINVAL;
}
/*
This function will write into PHY registers.
*/
static int amd8111e_write_phy(struct amd8111e_priv* lp,int phy_id, int reg, u32 val)
{
unsigned int repeat = REPEAT_CNT
void * mmio = lp->mmio;
unsigned int reg_val;
reg_val = readl(mmio + PHY_ACCESS);
while (reg_val & PHY_CMD_ACTIVE)
reg_val = readl( mmio + PHY_ACCESS );
writel( PHY_WR_CMD | ((phy_id & 0x1f) << 21) |
((reg & 0x1f) << 16)|val, mmio + PHY_ACCESS);
do{
reg_val = readl(mmio + PHY_ACCESS);
udelay(30); /* It takes 30 us to read/write the data */
} while (--repeat && (reg_val & PHY_CMD_ACTIVE));
if(reg_val & PHY_RD_ERR)
goto err_phy_write;
return 0;
err_phy_write:
return -EINVAL;
}
/*
This is the mii register read function provided to the mii interface.
*/
static int amd8111e_mdio_read(struct net_device * dev, int phy_id, int reg_num)
{
struct amd8111e_priv* lp = dev->priv;
unsigned int reg_val;
amd8111e_read_phy(lp,phy_id,reg_num,&reg_val);
return reg_val;
}
/*
This is the mii register write function provided to the mii interface.
*/
static void amd8111e_mdio_write(struct net_device * dev, int phy_id, int reg_num, int val)
{
struct amd8111e_priv* lp = dev->priv;
amd8111e_write_phy(lp, phy_id, reg_num, val);
}
/* /*
This function will set PHY speed. During initialization sets the original speed to 100 full. This function will set PHY speed. During initialization sets the original speed to 100 full.
...@@ -99,26 +196,39 @@ This function will set PHY speed. During initialization sets the original speed ...@@ -99,26 +196,39 @@ This function will set PHY speed. During initialization sets the original speed
static void amd8111e_set_ext_phy(struct net_device *dev) static void amd8111e_set_ext_phy(struct net_device *dev)
{ {
struct amd8111e_priv *lp = (struct amd8111e_priv *)dev->priv; struct amd8111e_priv *lp = (struct amd8111e_priv *)dev->priv;
unsigned long reg_val = 0; u32 bmcr,advert,tmp;
void * mmio = lp->mmio;
struct amd8111e_link_config *link_config = &lp->link_config;
if(!lp->opened){ /* Determine mii register values to set the speed */
/* Initializing SPEED_100 and DUPLEX_FULL as original values */ advert = amd8111e_mdio_read(dev, PHY_ID, MII_ADVERTISE);
link_config->orig_speed = SPEED_100; tmp = advert & ~(ADVERTISE_ALL | ADVERTISE_100BASE4);
link_config->orig_duplex = DUPLEX_FULL; switch (lp->ext_phy_option){
link_config->orig_phy_option = XPHYSP |XPHYFD;
default:
case SPEED_AUTONEG: /* advertise all values */
tmp |= ( ADVERTISE_10HALF|ADVERTISE_10FULL|
ADVERTISE_100HALF|ADVERTISE_100FULL) ;
break;
case SPEED10_HALF:
tmp |= ADVERTISE_10HALF;
break;
case SPEED10_FULL:
tmp |= ADVERTISE_10FULL;
break;
case SPEED100_HALF:
tmp |= ADVERTISE_100HALF;
break;
case SPEED100_FULL:
tmp |= ADVERTISE_100FULL;
break;
} }
reg_val = lp->ext_phy_option;
/* Disable port manager */
writel((u32) EN_PMGR, mmio + CMD3 );
/* Reset PHY */ if(advert != tmp)
writel((u32)XPHYRST | lp->ext_phy_option, mmio + CTRL2); amd8111e_mdio_write(dev, PHY_ID, MII_ADVERTISE, tmp);
/* Restart auto negotiation */
bmcr = amd8111e_mdio_read(dev, PHY_ID, MII_BMCR);
bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
amd8111e_mdio_write(dev, PHY_ID, MII_BMCR, bmcr);
/* Enable port manager */
writel((u32)VAL1 | EN_PMGR, mmio + CMD3 );
} }
/* /*
...@@ -156,7 +266,7 @@ static int amd8111e_free_skbs(struct net_device *dev) ...@@ -156,7 +266,7 @@ static int amd8111e_free_skbs(struct net_device *dev)
} }
/* /*
This will set the receive buffer length corresponding to the mtu size of network interface. This will set the receive buffer length corresponding to the mtu size of networkinterface.
*/ */
static inline void amd8111e_set_rx_buff_len(struct net_device* dev) static inline void amd8111e_set_rx_buff_len(struct net_device* dev)
{ {
...@@ -226,13 +336,13 @@ static int amd8111e_init_ring(struct net_device *dev) ...@@ -226,13 +336,13 @@ static int amd8111e_init_ring(struct net_device *dev)
lp->rx_ring[i].buff_phy_addr = cpu_to_le32(lp->rx_dma_addr[i]); lp->rx_ring[i].buff_phy_addr = cpu_to_le32(lp->rx_dma_addr[i]);
lp->rx_ring[i].buff_count = cpu_to_le16(lp->rx_buff_len); lp->rx_ring[i].buff_count = cpu_to_le16(lp->rx_buff_len);
lp->rx_ring[i].rx_dr_offset10 = cpu_to_le16(OWN_BIT); lp->rx_ring[i].rx_flags = cpu_to_le16(OWN_BIT);
} }
/* Initializing transmit descriptors */ /* Initializing transmit descriptors */
for (i = 0; i < NUM_TX_RING_DR; i++) { for (i = 0; i < NUM_TX_RING_DR; i++) {
lp->tx_ring[i].buff_phy_addr = 0; lp->tx_ring[i].buff_phy_addr = 0;
lp->tx_ring[i].tx_dr_offset2 = 0; lp->tx_ring[i].tx_flags = 0;
lp->tx_ring[i].buff_count = 0; lp->tx_ring[i].buff_count = 0;
} }
...@@ -253,6 +363,65 @@ static int amd8111e_init_ring(struct net_device *dev) ...@@ -253,6 +363,65 @@ static int amd8111e_init_ring(struct net_device *dev)
err_no_mem: err_no_mem:
return -ENOMEM; return -ENOMEM;
} }
/* This function will set the interrupt coalescing according to the input arguments */
static int amd8111e_set_coalesce(struct net_device * dev, enum coal_mode cmod)
{
unsigned int timeout;
unsigned int event_count;
struct amd8111e_priv *lp = dev->priv;
void* mmio = lp->mmio;
struct amd8111e_coalesce_conf * coal_conf = &lp->coal_conf;
switch(cmod)
{
case RX_INTR_COAL :
timeout = coal_conf->rx_timeout;
event_count = coal_conf->rx_event_count;
if( timeout > MAX_TIMEOUT ||
event_count > MAX_EVENT_COUNT )
return -EINVAL;
timeout = timeout * DELAY_TIMER_CONV;
writel(VAL0|STINTEN, mmio+INTEN0);
writel((u32)DLY_INT_A_R0|( event_count<< 16 )|timeout,
mmio+DLY_INT_A);
break;
case TX_INTR_COAL :
timeout = coal_conf->tx_timeout;
event_count = coal_conf->tx_event_count;
if( timeout > MAX_TIMEOUT ||
event_count > MAX_EVENT_COUNT )
return -EINVAL;
timeout = timeout * DELAY_TIMER_CONV;
writel(VAL0|STINTEN,mmio+INTEN0);
writel((u32)DLY_INT_B_T0|( event_count<< 16 )|timeout,
mmio+DLY_INT_B);
break;
case DISABLE_COAL:
writel(0,mmio+STVAL);
writel(STINTEN, mmio+INTEN0);
writel(0, mmio +DLY_INT_B);
writel(0, mmio+DLY_INT_A);
break;
case ENABLE_COAL:
/* Start the timer */
writel((u32)SOFT_TIMER_FREQ, mmio+STVAL); /* 0.5 sec */
writel(VAL0|STINTEN, mmio+INTEN0);
break;
default:
break;
}
return 0;
}
/* /*
This function initializes the device registers and starts the device. This function initializes the device registers and starts the device.
*/ */
...@@ -267,13 +436,17 @@ static int amd8111e_restart(struct net_device *dev) ...@@ -267,13 +436,17 @@ static int amd8111e_restart(struct net_device *dev)
if(amd8111e_init_ring(dev)) if(amd8111e_init_ring(dev))
return -ENOMEM; return -ENOMEM;
/* enable the port manager and set auto negotiation always */
writel((u32) VAL1|EN_PMGR, mmio + CMD3 );
writel((u32)XPHYANE|XPHYRST , mmio + CTRL2);
amd8111e_set_ext_phy(dev); amd8111e_set_ext_phy(dev);
/* set control registers */ /* set control registers */
reg_val = readl(mmio + CTRL1); reg_val = readl(mmio + CTRL1);
reg_val &= ~XMTSP_MASK;
writel( reg_val| XMTSP_128 | CACHE_ALIGN | B1_MASK, mmio + CTRL1 ); writel( reg_val| XMTSP_128 | CACHE_ALIGN, mmio + CTRL1 );
/* enable interrupt */ /* enable interrupt */
writel( APINT5EN | APINT4EN | APINT3EN | APINT2EN | APINT1EN | writel( APINT5EN | APINT4EN | APINT3EN | APINT2EN | APINT1EN |
...@@ -288,15 +461,21 @@ static int amd8111e_restart(struct net_device *dev) ...@@ -288,15 +461,21 @@ static int amd8111e_restart(struct net_device *dev)
writew((u32)NUM_TX_RING_DR, mmio + XMT_RING_LEN0); writew((u32)NUM_TX_RING_DR, mmio + XMT_RING_LEN0);
writew((u16)NUM_RX_RING_DR, mmio + RCV_RING_LEN0); writew((u16)NUM_RX_RING_DR, mmio + RCV_RING_LEN0);
/* set default IPG to 96 */
writew((u32)DEFAULT_IPG,mmio+IPG);
writew((u32)(DEFAULT_IPG-IFS1_DELTA), mmio + IFS1);
if(lp->options & OPTION_JUMBO_ENABLE){ if(lp->options & OPTION_JUMBO_ENABLE){
writel((u32)VAL2|JUMBO, mmio + CMD3); writel((u32)VAL2|JUMBO, mmio + CMD3);
/* Reset REX_UFLO */ /* Reset REX_UFLO */
writel( REX_UFLO, mmio + CMD2); writel( REX_UFLO, mmio + CMD2);
/* Should not set REX_UFLO for jumbo frames */ /* Should not set REX_UFLO for jumbo frames */
writel( VAL0 | APAD_XMT | REX_RTRY, mmio + CMD2); writel( VAL0 | APAD_XMT|REX_RTRY , mmio + CMD2);
}else }else{
writel( VAL0 | APAD_XMT | REX_RTRY|REX_UFLO, mmio + CMD2); writel( VAL0 | APAD_XMT | REX_RTRY|REX_UFLO, mmio + CMD2);
writel((u32)JUMBO, mmio + CMD3);
}
#if AMD8111E_VLAN_TAG_USED #if AMD8111E_VLAN_TAG_USED
writel((u32) VAL2|VSIZE|VL_TAG_DEL, mmio + CMD3); writel((u32) VAL2|VSIZE|VL_TAG_DEL, mmio + CMD3);
...@@ -306,11 +485,20 @@ static int amd8111e_restart(struct net_device *dev) ...@@ -306,11 +485,20 @@ static int amd8111e_restart(struct net_device *dev)
/* Setting the MAC address to the device */ /* Setting the MAC address to the device */
for(i = 0; i < ETH_ADDR_LEN; i++) for(i = 0; i < ETH_ADDR_LEN; i++)
writeb( dev->dev_addr[i], mmio + PADR + i ); writeb( dev->dev_addr[i], mmio + PADR + i );
/* Enable interrupt coalesce */
if(lp->options & OPTION_INTR_COAL_ENABLE){
printk(KERN_INFO "%s: Interrupt Coalescing Enabled.\n",
dev->name);
amd8111e_set_coalesce(dev,ENABLE_COAL);
}
/* set RUN bit to start the chip */ /* set RUN bit to start the chip */
writel(VAL2 | RDMD0, mmio + CMD0); writel(VAL2 | RDMD0, mmio + CMD0);
writel(VAL0 | INTREN | RUN, mmio + CMD0); writel(VAL0 | INTREN | RUN, mmio + CMD0);
/* To avoid PCI posting bug */
readl(mmio+CMD0);
return 0; return 0;
} }
/* /*
...@@ -383,7 +571,7 @@ static void amd8111e_init_hw_default( struct amd8111e_priv* lp) ...@@ -383,7 +571,7 @@ static void amd8111e_init_hw_default( struct amd8111e_priv* lp)
writew(MIB_CLEAR, mmio + MIB_ADDR); writew(MIB_CLEAR, mmio + MIB_ADDR);
/* Clear LARF */ /* Clear LARF */
AMD8111E_WRITE_REG64(mmio, LADRF,logic_filter); amd8111e_writeq(*(u64*)logic_filter,mmio+LADRF);
/* SRAM_SIZE register */ /* SRAM_SIZE register */
reg_val = readl(mmio + SRAM_SIZE); reg_val = readl(mmio + SRAM_SIZE);
...@@ -393,8 +581,11 @@ static void amd8111e_init_hw_default( struct amd8111e_priv* lp) ...@@ -393,8 +581,11 @@ static void amd8111e_init_hw_default( struct amd8111e_priv* lp)
#if AMD8111E_VLAN_TAG_USED #if AMD8111E_VLAN_TAG_USED
writel(VAL2|VSIZE|VL_TAG_DEL, mmio + CMD3 ); writel(VAL2|VSIZE|VL_TAG_DEL, mmio + CMD3 );
#endif #endif
/* CMD2 register */ /* Set default value to CTRL1 Register */
reg_val = readl(mmio + CMD2); writel(CTRL1_DEFAULT, mmio + CTRL1);
/* To avoid PCI posting bug */
readl(mmio + CMD2);
} }
...@@ -412,6 +603,9 @@ static void amd8111e_disable_interrupt(struct amd8111e_priv* lp) ...@@ -412,6 +603,9 @@ static void amd8111e_disable_interrupt(struct amd8111e_priv* lp)
/* Clear INT0 */ /* Clear INT0 */
intr0 = readl(lp->mmio + INT0); intr0 = readl(lp->mmio + INT0);
writel(intr0, lp->mmio + INT0); writel(intr0, lp->mmio + INT0);
/* To avoid PCI posting bug */
readl(lp->mmio + INT0);
} }
...@@ -421,6 +615,9 @@ This function stops the chip. ...@@ -421,6 +615,9 @@ This function stops the chip.
static void amd8111e_stop_chip(struct amd8111e_priv* lp) static void amd8111e_stop_chip(struct amd8111e_priv* lp)
{ {
writel(RUN, lp->mmio + CMD0); writel(RUN, lp->mmio + CMD0);
/* To avoid PCI posting bug */
readl(lp->mmio + CMD0);
} }
/* /*
...@@ -467,11 +664,10 @@ static int amd8111e_tx(struct net_device *dev) ...@@ -467,11 +664,10 @@ static int amd8111e_tx(struct net_device *dev)
struct amd8111e_priv* lp = dev->priv; struct amd8111e_priv* lp = dev->priv;
int tx_index = lp->tx_complete_idx & TX_RING_DR_MOD_MASK; int tx_index = lp->tx_complete_idx & TX_RING_DR_MOD_MASK;
int status; int status;
/* Complete all the transmit packet */ /* Complete all the transmit packet */
while (lp->tx_complete_idx != lp->tx_idx){ while (lp->tx_complete_idx != lp->tx_idx){
tx_index = lp->tx_complete_idx & TX_RING_DR_MOD_MASK; tx_index = lp->tx_complete_idx & TX_RING_DR_MOD_MASK;
status = le16_to_cpu(lp->tx_ring[tx_index].tx_dr_offset2); status = le16_to_cpu(lp->tx_ring[tx_index].tx_flags);
if(status & OWN_BIT) if(status & OWN_BIT)
break; /* It still hasn't been Txed */ break; /* It still hasn't been Txed */
...@@ -487,11 +683,15 @@ static int amd8111e_tx(struct net_device *dev) ...@@ -487,11 +683,15 @@ static int amd8111e_tx(struct net_device *dev)
lp->tx_skbuff[tx_index] = 0; lp->tx_skbuff[tx_index] = 0;
lp->tx_dma_addr[tx_index] = 0; lp->tx_dma_addr[tx_index] = 0;
} }
lp->tx_complete_idx++; lp->tx_complete_idx++;
/*COAL update tx coalescing parameters */
lp->coal_conf.tx_packets++;
lp->coal_conf.tx_bytes += lp->tx_ring[tx_index].buff_count;
if (netif_queue_stopped(dev) && if (netif_queue_stopped(dev) &&
lp->tx_complete_idx > lp->tx_idx - NUM_TX_BUFFERS +2){ lp->tx_complete_idx > lp->tx_idx - NUM_TX_BUFFERS +2){
/* The ring is no longer full, clear tbusy. */ /* The ring is no longer full, clear tbusy. */
/* lp->tx_full = 0; */
netif_wake_queue (dev); netif_wake_queue (dev);
} }
} }
...@@ -516,33 +716,31 @@ static int amd8111e_rx(struct net_device *dev) ...@@ -516,33 +716,31 @@ static int amd8111e_rx(struct net_device *dev)
/* If we own the next entry, it's a new packet. Send it up. */ /* If we own the next entry, it's a new packet. Send it up. */
while(++num_rx_pkt <= max_rx_pkt){ while(++num_rx_pkt <= max_rx_pkt){
if(lp->rx_ring[rx_index].rx_dr_offset10 & OWN_BIT) if(lp->rx_ring[rx_index].rx_flags & OWN_BIT)
return 0; return 0;
/* check if err summary bit is set */ /* check if err summary bit is set */
if(le16_to_cpu(lp->rx_ring[rx_index].rx_dr_offset10) & ERR_BIT){ if(le16_to_cpu(lp->rx_ring[rx_index].rx_flags) & ERR_BIT){
/* /*
* There is a tricky error noted by John Murphy, * There is a tricky error noted by John Murphy,
* <murf@perftech.com> to Russ Nelson: Even with full-sized * <murf@perftech.com> to Russ Nelson: Even with full-sized
* buffers it's possible for a jabber packet to use two * buffers it's possible for a jabber packet to use two
* buffers, with only the last correctly noting the error. */ * buffers, with only the last correctly noting the error. */
/* reseting flags */ /* reseting flags */
lp->rx_ring[rx_index].rx_dr_offset10 &= lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS;
cpu_to_le16(RESET_RX_FLAGS);
goto err_next_pkt; goto err_next_pkt;
} }
/* check for STP and ENP */ /* check for STP and ENP */
status = le16_to_cpu(lp->rx_ring[rx_index].rx_dr_offset10); status = le16_to_cpu(lp->rx_ring[rx_index].rx_flags);
if(!((status & STP_BIT) && (status & ENP_BIT))){ if(!((status & STP_BIT) && (status & ENP_BIT))){
/* reseting flags */ /* reseting flags */
lp->rx_ring[rx_index].rx_dr_offset10 &= lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS;
cpu_to_le16(RESET_RX_FLAGS);
goto err_next_pkt; goto err_next_pkt;
} }
pkt_len = le16_to_cpu(lp->rx_ring[rx_index].msg_count) - 4; pkt_len = le16_to_cpu(lp->rx_ring[rx_index].msg_count) - 4;
#if AMD8111E_VLAN_TAG_USED #if AMD8111E_VLAN_TAG_USED
vtag = le16_to_cpu(lp->rx_ring[rx_index].rx_dr_offset10) & TT_MASK; vtag = le16_to_cpu(lp->rx_ring[rx_index].rx_flags) & TT_MASK;
/*MAC will strip vlan tag*/ /*MAC will strip vlan tag*/
if(lp->vlgrp != NULL && vtag !=0) if(lp->vlgrp != NULL && vtag !=0)
min_pkt_len =MIN_PKT_LEN - 4; min_pkt_len =MIN_PKT_LEN - 4;
...@@ -551,16 +749,14 @@ static int amd8111e_rx(struct net_device *dev) ...@@ -551,16 +749,14 @@ static int amd8111e_rx(struct net_device *dev)
min_pkt_len =MIN_PKT_LEN; min_pkt_len =MIN_PKT_LEN;
if (pkt_len < min_pkt_len) { if (pkt_len < min_pkt_len) {
lp->rx_ring[rx_index].rx_dr_offset10 &= lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS;
cpu_to_le16(RESET_RX_FLAGS);
lp->stats.rx_errors++; lp->stats.rx_errors++;
goto err_next_pkt; goto err_next_pkt;
} }
if(!(new_skb = dev_alloc_skb(lp->rx_buff_len))){ if(!(new_skb = dev_alloc_skb(lp->rx_buff_len))){
/* if allocation fail, /* if allocation fail,
ignore that pkt and go to next one */ ignore that pkt and go to next one */
lp->rx_ring[rx_index].rx_dr_offset10 &= lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS;
cpu_to_le16(RESET_RX_FLAGS);
lp->stats.rx_errors++; lp->stats.rx_errors++;
goto err_next_pkt; goto err_next_pkt;
} }
...@@ -580,22 +776,26 @@ static int amd8111e_rx(struct net_device *dev) ...@@ -580,22 +776,26 @@ static int amd8111e_rx(struct net_device *dev)
#if AMD8111E_VLAN_TAG_USED #if AMD8111E_VLAN_TAG_USED
vtag = lp->rx_ring[rx_index].rx_dr_offset10 & TT_MASK; vtag = lp->rx_ring[rx_index].rx_flags & TT_MASK;
if(lp->vlgrp != NULL && (vtag == TT_VLAN_TAGGED)){ if(lp->vlgrp != NULL && (vtag == TT_VLAN_TAGGED)){
amd8111e_vlan_rx(lp, skb, amd8111e_vlan_rx(lp, skb,
lp->rx_ring[rx_index].tag_ctrl_info); lp->rx_ring[rx_index].tag_ctrl_info);
} else } else
#endif #endif
dev->last_rx = jiffies;
netif_rx (skb); netif_rx (skb);
/*COAL update rx coalescing parameters*/
lp->coal_conf.rx_packets++;
lp->coal_conf.rx_bytes += pkt_len;
dev->last_rx = jiffies;
err_next_pkt: err_next_pkt:
lp->rx_ring[rx_index].buff_phy_addr lp->rx_ring[rx_index].buff_phy_addr
= cpu_to_le32(lp->rx_dma_addr[rx_index]); = cpu_to_le32(lp->rx_dma_addr[rx_index]);
lp->rx_ring[rx_index].buff_count = lp->rx_ring[rx_index].buff_count =
cpu_to_le16(lp->rx_buff_len-2); cpu_to_le16(lp->rx_buff_len-2);
lp->rx_ring[rx_index].rx_dr_offset10 |= cpu_to_le16(OWN_BIT); lp->rx_ring[rx_index].rx_flags |= cpu_to_le16(OWN_BIT);
rx_index = (++lp->rx_idx) & RX_RING_DR_MOD_MASK; rx_index = (++lp->rx_idx) & RX_RING_DR_MOD_MASK;
} }
...@@ -603,8 +803,8 @@ static int amd8111e_rx(struct net_device *dev) ...@@ -603,8 +803,8 @@ static int amd8111e_rx(struct net_device *dev)
} }
/* /*
This function will store the original speed to restore later, if autoneg is turned on. This speed will be set later when the autoneg is turned off. If the link status indicates that link is down, that will be indicated to the kernel */ This function will indicate the link status to the kernel.
*/
static int amd8111e_link_change(struct net_device* dev) static int amd8111e_link_change(struct net_device* dev)
{ {
struct amd8111e_priv *lp = dev->priv; struct amd8111e_priv *lp = dev->priv;
...@@ -614,21 +814,11 @@ static int amd8111e_link_change(struct net_device* dev) ...@@ -614,21 +814,11 @@ static int amd8111e_link_change(struct net_device* dev)
status0 = readl(lp->mmio + STAT0); status0 = readl(lp->mmio + STAT0);
if(status0 & LINK_STATS){ if(status0 & LINK_STATS){
if(status0 & AUTONEG_COMPLETE){ if(status0 & AUTONEG_COMPLETE)
/* keeping the original speeds */
if((lp->link_config.speed != SPEED_INVALID)&&
(lp->link_config.duplex != DUPLEX_INVALID)){
lp->link_config.orig_speed = lp->link_config.speed;
lp->link_config.orig_duplex = lp->link_config.duplex;
lp->link_config.orig_phy_option = lp->ext_phy_option;
}
lp->link_config.speed = SPEED_INVALID;
lp->link_config.duplex = DUPLEX_INVALID;
lp->link_config.autoneg = AUTONEG_ENABLE; lp->link_config.autoneg = AUTONEG_ENABLE;
netif_carrier_on(dev); else
return 0; lp->link_config.autoneg = AUTONEG_DISABLE;
}
if(status0 & FULL_DPLX) if(status0 & FULL_DPLX)
lp->link_config.duplex = DUPLEX_FULL; lp->link_config.duplex = DUPLEX_FULL;
else else
...@@ -638,13 +828,17 @@ static int amd8111e_link_change(struct net_device* dev) ...@@ -638,13 +828,17 @@ static int amd8111e_link_change(struct net_device* dev)
lp->link_config.speed = SPEED_10; lp->link_config.speed = SPEED_10;
else if(speed == PHY_SPEED_100) else if(speed == PHY_SPEED_100)
lp->link_config.speed = SPEED_100; lp->link_config.speed = SPEED_100;
lp->link_config.autoneg = AUTONEG_DISABLE;
printk(KERN_INFO "%s: Link is Up. Speed is %s Mbps %s Duplex\n", dev->name,
(lp->link_config.speed == SPEED_100) ? "100": "10",
(lp->link_config.duplex == DUPLEX_FULL)? "Full": "Half");
netif_carrier_on(dev); netif_carrier_on(dev);
} }
else{ else{
lp->link_config.speed = SPEED_INVALID; lp->link_config.speed = SPEED_INVALID;
lp->link_config.duplex = DUPLEX_INVALID; lp->link_config.duplex = DUPLEX_INVALID;
lp->link_config.autoneg = AUTONEG_INVALID; lp->link_config.autoneg = AUTONEG_INVALID;
printk(KERN_INFO "%s: Link is Down.\n",dev->name);
netif_carrier_off(dev); netif_carrier_off(dev);
} }
...@@ -671,129 +865,250 @@ static int amd8111e_read_mib(void* mmio, u8 MIB_COUNTER) ...@@ -671,129 +865,250 @@ static int amd8111e_read_mib(void* mmio, u8 MIB_COUNTER)
} }
/* /*
This function retuurns the reads the mib registers and returns the hardware statistics. It adds the previous statistics with new values.*/ This function reads the mib registers and returns the hardware statistics. It updates previous internal driver statistics with new values.
*/
static struct net_device_stats *amd8111e_get_stats(struct net_device * dev) static struct net_device_stats *amd8111e_get_stats(struct net_device * dev)
{ {
struct amd8111e_priv *lp = dev->priv; struct amd8111e_priv *lp = dev->priv;
void * mmio = lp->mmio; void * mmio = lp->mmio;
unsigned long flags; unsigned long flags;
struct net_device_stats *prev_stats = &lp->prev_stats; /* struct net_device_stats *prev_stats = &lp->prev_stats; */
struct net_device_stats* new_stats = &lp->stats; struct net_device_stats* new_stats = &lp->stats;
if(!lp->opened) if(!lp->opened)
return prev_stats; return &lp->stats;
spin_lock_irqsave (&lp->lock, flags); spin_lock_irqsave (&lp->lock, flags);
/* stats.rx_packets */ /* stats.rx_packets */
new_stats->rx_packets = prev_stats->rx_packets+ new_stats->rx_packets = amd8111e_read_mib(mmio, rcv_broadcast_pkts)+
amd8111e_read_mib(mmio, rcv_broadcast_pkts)+ amd8111e_read_mib(mmio, rcv_multicast_pkts)+
amd8111e_read_mib(mmio, rcv_multicast_pkts)+ amd8111e_read_mib(mmio, rcv_unicast_pkts);
amd8111e_read_mib(mmio, rcv_unicast_pkts);
/* stats.tx_packets */ /* stats.tx_packets */
new_stats->tx_packets = prev_stats->tx_packets+ new_stats->tx_packets = amd8111e_read_mib(mmio, xmt_packets);
amd8111e_read_mib(mmio, xmt_packets);
/*stats.rx_bytes */ /*stats.rx_bytes */
new_stats->rx_bytes = prev_stats->rx_bytes+ new_stats->rx_bytes = amd8111e_read_mib(mmio, rcv_octets);
amd8111e_read_mib(mmio, rcv_octets);
/* stats.tx_bytes */ /* stats.tx_bytes */
new_stats->tx_bytes = prev_stats->tx_bytes+ new_stats->tx_bytes = amd8111e_read_mib(mmio, xmt_octets);
amd8111e_read_mib(mmio, xmt_octets);
/* stats.rx_errors */ /* stats.rx_errors */
new_stats->rx_errors = prev_stats->rx_errors+ new_stats->rx_errors = amd8111e_read_mib(mmio, rcv_undersize_pkts)+
amd8111e_read_mib(mmio, rcv_undersize_pkts)+ amd8111e_read_mib(mmio, rcv_fragments)+
amd8111e_read_mib(mmio, rcv_fragments)+ amd8111e_read_mib(mmio, rcv_jabbers)+
amd8111e_read_mib(mmio, rcv_jabbers)+ amd8111e_read_mib(mmio, rcv_alignment_errors)+
amd8111e_read_mib(mmio, rcv_alignment_errors)+ amd8111e_read_mib(mmio, rcv_fcs_errors)+
amd8111e_read_mib(mmio, rcv_fcs_errors)+ amd8111e_read_mib(mmio, rcv_miss_pkts);
amd8111e_read_mib(mmio, rcv_miss_pkts);
/* stats.tx_errors */ /* stats.tx_errors */
new_stats->tx_errors = prev_stats->tx_errors+ new_stats->tx_errors = amd8111e_read_mib(mmio, xmt_underrun_pkts);
amd8111e_read_mib(mmio, xmt_underrun_pkts);
/* stats.rx_dropped*/ /* stats.rx_dropped*/
new_stats->rx_dropped = prev_stats->rx_dropped+ new_stats->rx_dropped = amd8111e_read_mib(mmio, rcv_miss_pkts);
amd8111e_read_mib(mmio, rcv_miss_pkts);
/* stats.tx_dropped*/ /* stats.tx_dropped*/
new_stats->tx_dropped = prev_stats->tx_dropped+ new_stats->tx_dropped = amd8111e_read_mib(mmio, xmt_underrun_pkts);
amd8111e_read_mib(mmio, xmt_underrun_pkts);
/* stats.multicast*/ /* stats.multicast*/
new_stats->multicast = prev_stats->multicast+ new_stats->multicast = amd8111e_read_mib(mmio, rcv_multicast_pkts);
amd8111e_read_mib(mmio, rcv_multicast_pkts);
/* stats.collisions*/ /* stats.collisions*/
new_stats->collisions = prev_stats->collisions+ new_stats->collisions = amd8111e_read_mib(mmio, xmt_collisions);
amd8111e_read_mib(mmio, xmt_collisions);
/* stats.rx_length_errors*/ /* stats.rx_length_errors*/
new_stats->rx_length_errors = prev_stats->rx_length_errors+ new_stats->rx_length_errors =
amd8111e_read_mib(mmio, rcv_undersize_pkts)+ amd8111e_read_mib(mmio, rcv_undersize_pkts)+
amd8111e_read_mib(mmio, rcv_oversize_pkts); amd8111e_read_mib(mmio, rcv_oversize_pkts);
/* stats.rx_over_errors*/ /* stats.rx_over_errors*/
new_stats->rx_over_errors = prev_stats->rx_over_errors+ new_stats->rx_over_errors = amd8111e_read_mib(mmio, rcv_miss_pkts);
amd8111e_read_mib(mmio, rcv_miss_pkts);
/* stats.rx_crc_errors*/ /* stats.rx_crc_errors*/
new_stats->rx_crc_errors = prev_stats->rx_crc_errors+ new_stats->rx_crc_errors = amd8111e_read_mib(mmio, rcv_fcs_errors);
amd8111e_read_mib(mmio, rcv_fcs_errors);
/* stats.rx_frame_errors*/ /* stats.rx_frame_errors*/
new_stats->rx_frame_errors = prev_stats->rx_frame_errors+ new_stats->rx_frame_errors =
amd8111e_read_mib(mmio, rcv_alignment_errors); amd8111e_read_mib(mmio, rcv_alignment_errors);
/* stats.rx_fifo_errors */ /* stats.rx_fifo_errors */
new_stats->rx_fifo_errors = prev_stats->rx_fifo_errors+ new_stats->rx_fifo_errors = amd8111e_read_mib(mmio, rcv_miss_pkts);
amd8111e_read_mib(mmio, rcv_miss_pkts);
/* stats.rx_missed_errors */ /* stats.rx_missed_errors */
new_stats->rx_missed_errors = prev_stats->rx_missed_errors+ new_stats->rx_missed_errors = amd8111e_read_mib(mmio, rcv_miss_pkts);
amd8111e_read_mib(mmio, rcv_miss_pkts);
/* stats.tx_aborted_errors*/ /* stats.tx_aborted_errors*/
new_stats->tx_aborted_errors = prev_stats->tx_aborted_errors+ new_stats->tx_aborted_errors =
amd8111e_read_mib(mmio, xmt_excessive_collision); amd8111e_read_mib(mmio, xmt_excessive_collision);
/* stats.tx_carrier_errors*/ /* stats.tx_carrier_errors*/
new_stats->tx_carrier_errors = prev_stats->tx_carrier_errors+ new_stats->tx_carrier_errors =
amd8111e_read_mib(mmio, xmt_loss_carrier); amd8111e_read_mib(mmio, xmt_loss_carrier);
/* stats.tx_fifo_errors*/ /* stats.tx_fifo_errors*/
new_stats->tx_fifo_errors = prev_stats->tx_fifo_errors+ new_stats->tx_fifo_errors = amd8111e_read_mib(mmio, xmt_underrun_pkts);
amd8111e_read_mib(mmio, xmt_underrun_pkts);
/* stats.tx_window_errors*/ /* stats.tx_window_errors*/
new_stats->tx_window_errors = prev_stats->tx_window_errors+ new_stats->tx_window_errors =
amd8111e_read_mib(mmio, xmt_late_collision); amd8111e_read_mib(mmio, xmt_late_collision);
/* Reset the mibs for collecting new statistics */
/* writew(MIB_CLEAR, mmio + MIB_ADDR);*/
spin_unlock_irqrestore (&lp->lock, flags); spin_unlock_irqrestore (&lp->lock, flags);
return new_stats; return new_stats;
} }
/* This function recalculate the interupt coalescing mode on every interrupt
according to the datarate and the packet rate.
*/
static int amd8111e_calc_coalesce(struct net_device *dev)
{
struct amd8111e_priv *lp = dev->priv;
struct amd8111e_coalesce_conf * coal_conf = &lp->coal_conf;
int tx_pkt_rate;
int rx_pkt_rate;
int tx_data_rate;
int rx_data_rate;
int rx_pkt_size;
int tx_pkt_size;
tx_pkt_rate = coal_conf->tx_packets - coal_conf->tx_prev_packets;
coal_conf->tx_prev_packets = coal_conf->tx_packets;
tx_data_rate = coal_conf->tx_bytes - coal_conf->tx_prev_bytes;
coal_conf->tx_prev_bytes = coal_conf->tx_bytes;
rx_pkt_rate = coal_conf->rx_packets - coal_conf->rx_prev_packets;
coal_conf->rx_prev_packets = coal_conf->rx_packets;
rx_data_rate = coal_conf->rx_bytes - coal_conf->rx_prev_bytes;
coal_conf->rx_prev_bytes = coal_conf->rx_bytes;
if(rx_pkt_rate < 800){
if(coal_conf->rx_coal_type != NO_COALESCE){
coal_conf->rx_timeout = 0x0;
coal_conf->rx_event_count = 0;
amd8111e_set_coalesce(dev,RX_INTR_COAL);
coal_conf->rx_coal_type = NO_COALESCE;
}
}
else{
rx_pkt_size = rx_data_rate/rx_pkt_rate;
if (rx_pkt_size < 128){
if(coal_conf->rx_coal_type != NO_COALESCE){
coal_conf->rx_timeout = 0;
coal_conf->rx_event_count = 0;
amd8111e_set_coalesce(dev,RX_INTR_COAL);
coal_conf->rx_coal_type = NO_COALESCE;
}
}
else if ( (rx_pkt_size >= 128) && (rx_pkt_size < 512) ){
if(coal_conf->rx_coal_type != LOW_COALESCE){
coal_conf->rx_timeout = 1;
coal_conf->rx_event_count = 4;
amd8111e_set_coalesce(dev,RX_INTR_COAL);
coal_conf->rx_coal_type = LOW_COALESCE;
}
}
else if ((rx_pkt_size >= 512) && (rx_pkt_size < 1024)){
if(coal_conf->rx_coal_type != MEDIUM_COALESCE){
coal_conf->rx_timeout = 1;
coal_conf->rx_event_count = 4;
amd8111e_set_coalesce(dev,RX_INTR_COAL);
coal_conf->rx_coal_type = MEDIUM_COALESCE;
}
}
else if(rx_pkt_size >= 1024){
if(coal_conf->rx_coal_type != HIGH_COALESCE){
coal_conf->rx_timeout = 2;
coal_conf->rx_event_count = 3;
amd8111e_set_coalesce(dev,RX_INTR_COAL);
coal_conf->rx_coal_type = HIGH_COALESCE;
}
}
}
/* NOW FOR TX INTR COALESC */
if(tx_pkt_rate < 800){
if(coal_conf->tx_coal_type != NO_COALESCE){
coal_conf->tx_timeout = 0x0;
coal_conf->tx_event_count = 0;
amd8111e_set_coalesce(dev,TX_INTR_COAL);
coal_conf->tx_coal_type = NO_COALESCE;
}
}
else{
tx_pkt_size = tx_data_rate/tx_pkt_rate;
if (tx_pkt_size < 128){
if(coal_conf->tx_coal_type != NO_COALESCE){
coal_conf->tx_timeout = 0;
coal_conf->tx_event_count = 0;
amd8111e_set_coalesce(dev,TX_INTR_COAL);
coal_conf->tx_coal_type = NO_COALESCE;
}
}
else if ( (tx_pkt_size >= 128) && (tx_pkt_size < 512) ){
if(coal_conf->tx_coal_type != LOW_COALESCE){
coal_conf->tx_timeout = 1;
coal_conf->tx_event_count = 2;
amd8111e_set_coalesce(dev,TX_INTR_COAL);
coal_conf->tx_coal_type = LOW_COALESCE;
}
}
else if ((tx_pkt_size >= 512) && (tx_pkt_size < 1024)){
if(coal_conf->tx_coal_type != MEDIUM_COALESCE){
coal_conf->tx_timeout = 2;
coal_conf->tx_event_count = 5;
amd8111e_set_coalesce(dev,TX_INTR_COAL);
coal_conf->tx_coal_type = MEDIUM_COALESCE;
}
}
else if(tx_pkt_size >= 1024){
if (tx_pkt_size >= 1024){
if(coal_conf->tx_coal_type != HIGH_COALESCE){
coal_conf->tx_timeout = 4;
coal_conf->tx_event_count = 8;
amd8111e_set_coalesce(dev,TX_INTR_COAL);
coal_conf->tx_coal_type = HIGH_COALESCE;
}
}
}
}
return 0;
}
/* /*
This is device interrupt function. It handles transmit, receive and link change interrupts. This is device interrupt function. It handles transmit, receive,link change and hardware timer interrupts.
*/ */
static irqreturn_t static irqreturn_t amd8111e_interrupt(int irq, void *dev_id, struct pt_regs *regs)
amd8111e_interrupt(int irq, void *dev_id, struct pt_regs *regs)
{ {
struct net_device * dev = (struct net_device *) dev_id; struct net_device * dev = (struct net_device *) dev_id;
struct amd8111e_priv *lp = dev->priv; struct amd8111e_priv *lp = dev->priv;
void * mmio = lp->mmio; void * mmio = lp->mmio;
unsigned int intr0; unsigned int intr0;
int handled = 0; unsigned int handled = 1;
if(dev == NULL) if(dev == NULL)
return IRQ_NONE; return IRQ_NONE;
spin_lock (&lp->lock); if (regs) spin_lock (&lp->lock);
/* disabling interrupt */ /* disabling interrupt */
writel(INTREN, mmio + CMD0); writel(INTREN, mmio + CMD0);
...@@ -802,10 +1117,11 @@ amd8111e_interrupt(int irq, void *dev_id, struct pt_regs *regs) ...@@ -802,10 +1117,11 @@ amd8111e_interrupt(int irq, void *dev_id, struct pt_regs *regs)
/* Process all the INT event until INTR bit is clear. */ /* Process all the INT event until INTR bit is clear. */
if (!(intr0 & INTR)) if (!(intr0 & INTR)) {
handled = 0;
goto err_no_interrupt; goto err_no_interrupt;
}
handled = 1;
/* Current driver processes 3 interrupts : RINT,TINT,LCINT */ /* Current driver processes 3 interrupts : RINT,TINT,LCINT */
writel(intr0, mmio + INT0); writel(intr0, mmio + INT0);
...@@ -822,15 +1138,21 @@ amd8111e_interrupt(int irq, void *dev_id, struct pt_regs *regs) ...@@ -822,15 +1138,21 @@ amd8111e_interrupt(int irq, void *dev_id, struct pt_regs *regs)
/* Check if Link Change Interrupt has occurred. */ /* Check if Link Change Interrupt has occurred. */
if (intr0 & LCINT) if (intr0 & LCINT)
amd8111e_link_change(dev); amd8111e_link_change(dev);
/* Check if Hardware Timer Interrupt has occurred. */
if (intr0 & STINT)
amd8111e_calc_coalesce(dev);
err_no_interrupt: err_no_interrupt:
writel( VAL0 | INTREN,mmio + CMD0); writel( VAL0 | INTREN,mmio + CMD0);
spin_unlock(&lp->lock);
if (regs) spin_unlock(&lp->lock);
return IRQ_RETVAL(handled); return IRQ_RETVAL(handled);
} }
/* /*
This function closes the network interface and copies the new set of statistics into the previous statistics structure so that most recent statistics will be available after the interface is down. This function closes the network interface and updates the statistics so that most recent statistics will be available after the interface is down.
*/ */
static int amd8111e_close(struct net_device * dev) static int amd8111e_close(struct net_device * dev)
{ {
...@@ -845,10 +1167,15 @@ static int amd8111e_close(struct net_device * dev) ...@@ -845,10 +1167,15 @@ static int amd8111e_close(struct net_device * dev)
netif_carrier_off(lp->amd8111e_net_dev); netif_carrier_off(lp->amd8111e_net_dev);
/* Delete ipg timer */
if(lp->options & OPTION_DYN_IPG_ENABLE)
del_timer_sync(&lp->ipg_data.ipg_timer);
/* Update the statistics before closing */
amd8111e_get_stats(dev);
spin_unlock_irq(&lp->lock); spin_unlock_irq(&lp->lock);
free_irq(dev->irq, dev); free_irq(dev->irq, dev);
memcpy(&lp->prev_stats,amd8111e_get_stats(dev), sizeof(lp->prev_stats));
lp->opened = 0; lp->opened = 0;
return 0; return 0;
} }
...@@ -870,7 +1197,12 @@ static int amd8111e_open(struct net_device * dev ) ...@@ -870,7 +1197,12 @@ static int amd8111e_open(struct net_device * dev )
spin_unlock_irq(&lp->lock); spin_unlock_irq(&lp->lock);
return -ENOMEM; return -ENOMEM;
} }
/* Start ipg timer */
if(lp->options & OPTION_DYN_IPG_ENABLE){
add_timer(&lp->ipg_data.ipg_timer);
printk(KERN_INFO "%s: Dynamic IPG Enabled.\n",dev->name);
}
lp->opened = 1; lp->opened = 1;
spin_unlock_irq(&lp->lock); spin_unlock_irq(&lp->lock);
...@@ -908,11 +1240,10 @@ static int amd8111e_start_xmit(struct sk_buff *skb, struct net_device * dev) ...@@ -908,11 +1240,10 @@ static int amd8111e_start_xmit(struct sk_buff *skb, struct net_device * dev)
lp->tx_ring[tx_index].buff_count = cpu_to_le16(skb->len); lp->tx_ring[tx_index].buff_count = cpu_to_le16(skb->len);
lp->tx_skbuff[tx_index] = skb; lp->tx_skbuff[tx_index] = skb;
lp->tx_ring[tx_index].tx_dr_offset2 = 0; lp->tx_ring[tx_index].tx_flags = 0;
#if AMD8111E_VLAN_TAG_USED #if AMD8111E_VLAN_TAG_USED
if((lp->vlgrp != NULL) && vlan_tx_tag_present(skb)){ if((lp->vlgrp != NULL) && vlan_tx_tag_present(skb)){
lp->tx_ring[tx_index].tag_ctrl_cmd |= lp->tx_ring[tx_index].tag_ctrl_cmd |=
cpu_to_le32(TCC_VLAN_INSERT); cpu_to_le32(TCC_VLAN_INSERT);
lp->tx_ring[tx_index].tag_ctrl_info = lp->tx_ring[tx_index].tag_ctrl_info =
...@@ -926,7 +1257,7 @@ static int amd8111e_start_xmit(struct sk_buff *skb, struct net_device * dev) ...@@ -926,7 +1257,7 @@ static int amd8111e_start_xmit(struct sk_buff *skb, struct net_device * dev)
(u32) cpu_to_le32(lp->tx_dma_addr[tx_index]); (u32) cpu_to_le32(lp->tx_dma_addr[tx_index]);
/* Set FCS and LTINT bits */ /* Set FCS and LTINT bits */
lp->tx_ring[tx_index].tx_dr_offset2 |= lp->tx_ring[tx_index].tx_flags |=
cpu_to_le16(OWN_BIT | STP_BIT | ENP_BIT|ADD_FCS_BIT|LTINT_BIT); cpu_to_le16(OWN_BIT | STP_BIT | ENP_BIT|ADD_FCS_BIT|LTINT_BIT);
lp->tx_idx++; lp->tx_idx++;
...@@ -949,16 +1280,54 @@ This function returns all the memory mapped registers of the device. ...@@ -949,16 +1280,54 @@ This function returns all the memory mapped registers of the device.
static char* amd8111e_read_regs(struct amd8111e_priv* lp) static char* amd8111e_read_regs(struct amd8111e_priv* lp)
{ {
void * mmio = lp->mmio; void * mmio = lp->mmio;
unsigned char * reg_buff; u32 * reg_buff;
int i;
reg_buff = kmalloc( AMD8111E_REG_DUMP_LEN,GFP_KERNEL); reg_buff = kmalloc( AMD8111E_REG_DUMP_LEN,GFP_KERNEL);
if(NULL == reg_buff) if(NULL == reg_buff)
return NULL; return NULL;
for (i=0; i < AMD8111E_REG_DUMP_LEN; i+=4)
reg_buff[i]= readl(mmio + i); /* Read only necessary registers */
return reg_buff; reg_buff[0] = readl(mmio + XMT_RING_BASE_ADDR0);
reg_buff[1] = readl(mmio + XMT_RING_LEN0);
reg_buff[2] = readl(mmio + RCV_RING_BASE_ADDR0);
reg_buff[3] = readl(mmio + RCV_RING_LEN0);
reg_buff[4] = readl(mmio + CMD0);
reg_buff[5] = readl(mmio + CMD2);
reg_buff[6] = readl(mmio + CMD3);
reg_buff[7] = readl(mmio + CMD7);
reg_buff[8] = readl(mmio + INT0);
reg_buff[9] = readl(mmio + INTEN0);
reg_buff[10] = readl(mmio + LADRF);
reg_buff[11] = readl(mmio + LADRF+4);
reg_buff[12] = readl(mmio + STAT0);
return (char *)reg_buff;
}
/*
amd8111e crc generator implementation is different from the kernel
ether_crc() function.
*/
int amd8111e_ether_crc(int len, char* mac_addr)
{
int i,byte;
unsigned char octet;
u32 crc= INITCRC;
for(byte=0; byte < len; byte++){
octet = mac_addr[byte];
for( i=0;i < 8; i++){
/*If the next bit form the input stream is 1,subtract the divisor (CRC32) from the dividend(crc).*/
if( (octet & 0x1) ^ (crc & 0x1) ){
crc >>= 1;
crc ^= CRC32;
}
else
crc >>= 1;
octet >>= 1;
}
}
return crc;
} }
/* /*
This function sets promiscuos mode, all-multi mode or the multicast address This function sets promiscuos mode, all-multi mode or the multicast address
...@@ -970,9 +1339,8 @@ static void amd8111e_set_multicast_list(struct net_device *dev) ...@@ -970,9 +1339,8 @@ static void amd8111e_set_multicast_list(struct net_device *dev)
struct amd8111e_priv *lp = dev->priv; struct amd8111e_priv *lp = dev->priv;
u32 mc_filter[2] ; u32 mc_filter[2] ;
int i,bit_num; int i,bit_num;
if(dev->flags & IFF_PROMISC){ if(dev->flags & IFF_PROMISC){
printk("%s: Setting promiscuous mode.\n",dev->name); printk(KERN_INFO "%s: Setting promiscuous mode.\n",dev->name);
writel( VAL2 | PROM, lp->mmio + CMD2); writel( VAL2 | PROM, lp->mmio + CMD2);
return; return;
} }
...@@ -983,7 +1351,7 @@ static void amd8111e_set_multicast_list(struct net_device *dev) ...@@ -983,7 +1351,7 @@ static void amd8111e_set_multicast_list(struct net_device *dev)
mc_filter[1] = mc_filter[0] = 0xffffffff; mc_filter[1] = mc_filter[0] = 0xffffffff;
lp->mc_list = dev->mc_list; lp->mc_list = dev->mc_list;
lp->options |= OPTION_MULTICAST_ENABLE; lp->options |= OPTION_MULTICAST_ENABLE;
AMD8111E_WRITE_REG64(lp->mmio, LADRF,mc_filter); amd8111e_writeq(*(u64*)mc_filter,lp->mmio + LADRF);
return; return;
} }
if( dev->mc_count == 0 ){ if( dev->mc_count == 0 ){
...@@ -991,7 +1359,7 @@ static void amd8111e_set_multicast_list(struct net_device *dev) ...@@ -991,7 +1359,7 @@ static void amd8111e_set_multicast_list(struct net_device *dev)
mc_filter[1] = mc_filter[0] = 0; mc_filter[1] = mc_filter[0] = 0;
lp->mc_list = 0; lp->mc_list = 0;
lp->options &= ~OPTION_MULTICAST_ENABLE; lp->options &= ~OPTION_MULTICAST_ENABLE;
AMD8111E_WRITE_REG64(lp->mmio, LADRF,mc_filter); amd8111e_writeq(*(u64*)mc_filter,lp->mmio + LADRF);
/* disable promiscous mode */ /* disable promiscous mode */
writel(PROM, lp->mmio + CMD2); writel(PROM, lp->mmio + CMD2);
return; return;
...@@ -1002,14 +1370,16 @@ static void amd8111e_set_multicast_list(struct net_device *dev) ...@@ -1002,14 +1370,16 @@ static void amd8111e_set_multicast_list(struct net_device *dev)
mc_filter[1] = mc_filter[0] = 0; mc_filter[1] = mc_filter[0] = 0;
for (i = 0, mc_ptr = dev->mc_list; mc_ptr && i < dev->mc_count; for (i = 0, mc_ptr = dev->mc_list; mc_ptr && i < dev->mc_count;
i++, mc_ptr = mc_ptr->next) { i++, mc_ptr = mc_ptr->next) {
bit_num = ether_crc(ETH_ALEN, mc_ptr->dmi_addr) >> 26; bit_num = ( amd8111e_ether_crc(ETH_ALEN,mc_ptr->dmi_addr) >> 26 ) & 0x3f;
mc_filter[bit_num >> 5] |= 1 << (bit_num & 31); mc_filter[bit_num >> 5] |= 1 << (bit_num & 31);
} }
amd8111e_writeq(*(u64*)mc_filter,lp->mmio+ LADRF);
/* To eliminate PCI posting bug */
readl(lp->mmio + CMD2);
AMD8111E_WRITE_REG64(lp->mmio, LADRF, mc_filter);
return;
} }
/* /*
This function handles all the ethtool ioctls. It gives driver info, gets/sets driver speed, gets memory mapped register values, forces auto negotiation, sets/gets WOL options for ethtool application. This function handles all the ethtool ioctls. It gives driver info, gets/sets driver speed, gets memory mapped register values, forces auto negotiation, sets/gets WOL options for ethtool application.
*/ */
...@@ -1032,6 +1402,7 @@ static int amd8111e_ethtool_ioctl(struct net_device* dev, void* useraddr) ...@@ -1032,6 +1402,7 @@ static int amd8111e_ethtool_ioctl(struct net_device* dev, void* useraddr)
strcpy (info.driver, MODULE_NAME); strcpy (info.driver, MODULE_NAME);
strcpy (info.version, MODULE_VERSION); strcpy (info.version, MODULE_VERSION);
memset(&info.fw_version, 0, sizeof(info.fw_version)); memset(&info.fw_version, 0, sizeof(info.fw_version));
sprintf(info.fw_version,"%u",chip_version);
strcpy (info.bus_info, pci_dev->slot_name); strcpy (info.bus_info, pci_dev->slot_name);
info.eedump_len = 0; info.eedump_len = 0;
info.regdump_len = AMD8111E_REG_DUMP_LEN; info.regdump_len = AMD8111E_REG_DUMP_LEN;
...@@ -1039,85 +1410,27 @@ static int amd8111e_ethtool_ioctl(struct net_device* dev, void* useraddr) ...@@ -1039,85 +1410,27 @@ static int amd8111e_ethtool_ioctl(struct net_device* dev, void* useraddr)
return -EFAULT; return -EFAULT;
return 0; return 0;
} }
case ETHTOOL_GSET:{ /* get settings */
struct ethtool_cmd cmd = { ETHTOOL_GSET }; case ETHTOOL_GSET: {
struct ethtool_cmd ecmd = { ETHTOOL_GSET };
if (!lp->opened) spin_lock_irq(&lp->lock);
return -EAGAIN; mii_ethtool_gset(&lp->mii_if, &ecmd);
spin_unlock_irq(&lp->lock);
cmd.supported = SUPPORTED_Autoneg | if (copy_to_user(useraddr, &ecmd, sizeof(ecmd)))
SUPPORTED_100baseT_Half |
SUPPORTED_100baseT_Full |
SUPPORTED_10baseT_Half |
SUPPORTED_10baseT_Full |
SUPPORTED_MII;
cmd.advertising = ADVERTISED_Autoneg |
ADVERTISED_100baseT_Half |
ADVERTISED_100baseT_Full |
ADVERTISED_10baseT_Half |
ADVERTISED_10baseT_Full |
ADVERTISED_MII;
cmd.speed = lp->link_config.speed;
cmd.duplex = lp->link_config.duplex;
cmd.port = 0;
cmd.phy_address = PHY_ID;
cmd.transceiver = XCVR_EXTERNAL;
cmd.autoneg = lp->link_config.autoneg;
cmd.maxtxpkt = 0; /* not implemented interrupt coalasing */
cmd.maxrxpkt = 0; /* not implemented interrupt coalasing */
if (copy_to_user(useraddr, &cmd, sizeof(cmd)))
return -EFAULT; return -EFAULT;
return 0; return 0;
} }
/* set settings */
case ETHTOOL_SSET: { case ETHTOOL_SSET: {
int r;
struct ethtool_cmd cmd; struct ethtool_cmd ecmd;
if (copy_from_user(&ecmd, useraddr, sizeof(ecmd)))
if (!lp->opened)
return -EAGAIN;
if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
return -EFAULT; return -EFAULT;
spin_lock_irq(&lp->lock); spin_lock_irq(&lp->lock);
r = mii_ethtool_sset(&lp->mii_if, &ecmd);
if(cmd.autoneg == AUTONEG_ENABLE){
/* keeping the original speeds */
if((lp->link_config.speed != SPEED_INVALID)&&
(lp->link_config.duplex != DUPLEX_INVALID)){
lp->link_config.orig_speed = lp->link_config.speed;
lp->link_config.orig_duplex = lp->link_config.duplex;
lp->link_config.orig_phy_option = lp->ext_phy_option;
}
lp->ext_phy_option = XPHYANE;
}
else if(cmd.speed == SPEED_100 && cmd.duplex == DUPLEX_HALF)
lp->ext_phy_option = XPHYSP;
else if(cmd.speed == SPEED_100 && cmd.duplex == DUPLEX_FULL)
lp->ext_phy_option = XPHYSP |XPHYFD;
else if(cmd.speed == SPEED_10 && cmd.duplex == DUPLEX_HALF)
lp->ext_phy_option = 0;
else if(cmd.speed == SPEED_10 && cmd.duplex == DUPLEX_FULL)
lp->ext_phy_option = XPHYFD;
else {
/* setting the original speed */
cmd.speed = lp->link_config.orig_speed;
cmd.duplex = lp->link_config.orig_duplex;
lp->ext_phy_option = lp->link_config.orig_phy_option;
}
lp->link_config.autoneg = cmd.autoneg;
if (cmd.autoneg == AUTONEG_ENABLE) {
lp->link_config.speed = SPEED_INVALID;
lp->link_config.duplex = DUPLEX_INVALID;
} else {
lp->link_config.speed = cmd.speed;
lp->link_config.duplex = cmd.duplex;
}
amd8111e_set_ext_phy(dev);
spin_unlock_irq(&lp->lock); spin_unlock_irq(&lp->lock);
return 0; return r;
} }
case ETHTOOL_GREGS: { case ETHTOOL_GREGS: {
struct ethtool_regs regs; struct ethtool_regs regs;
...@@ -1143,24 +1456,17 @@ static int amd8111e_ethtool_ioctl(struct net_device* dev, void* useraddr) ...@@ -1143,24 +1456,17 @@ static int amd8111e_ethtool_ioctl(struct net_device* dev, void* useraddr)
kfree(regbuf); kfree(regbuf);
return ret; return ret;
} }
/* restart autonegotiation */
case ETHTOOL_NWAY_RST: { case ETHTOOL_NWAY_RST: {
int ret; return mii_nway_restart(&lp->mii_if);
spin_lock_irq(&lp->lock);
if(lp->link_config.autoneg == AUTONEG_ENABLE){
lp->ext_phy_option = XPHYANE;
amd8111e_set_ext_phy(dev);
ret = 0;
}else
ret = -EINVAL;
spin_unlock_irq(&lp->lock);
return ret;
} }
/* get link status */
case ETHTOOL_GLINK: { case ETHTOOL_GLINK: {
struct ethtool_value val = { ETHTOOL_GLINK }; struct ethtool_value val = {ETHTOOL_GLINK};
val.data = mii_link_ok(&lp->mii_if);
val.data = netif_carrier_ok(dev) ? 1 : 0;
if (copy_to_user(useraddr, &val, sizeof(val))) if (copy_to_user(useraddr, &val, sizeof(val)))
return -EFAULT; return -EFAULT;
return 0;
} }
case ETHTOOL_GWOL: { case ETHTOOL_GWOL: {
struct ethtool_wolinfo wol_info = { ETHTOOL_GWOL }; struct ethtool_wolinfo wol_info = { ETHTOOL_GWOL };
...@@ -1199,60 +1505,6 @@ static int amd8111e_ethtool_ioctl(struct net_device* dev, void* useraddr) ...@@ -1199,60 +1505,6 @@ static int amd8111e_ethtool_ioctl(struct net_device* dev, void* useraddr)
} }
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
static int amd8111e_read_phy(struct amd8111e_priv* lp, int phy_id, int reg, u32* val)
{
void * mmio = lp->mmio;
unsigned int reg_val;
unsigned int repeat= REPEAT_CNT;
reg_val = readl(mmio + PHY_ACCESS);
while (reg_val & PHY_CMD_ACTIVE)
reg_val = readl( mmio + PHY_ACCESS );
writel( PHY_RD_CMD | ((phy_id & 0x1f) << 21) |
((reg & 0x1f) << 16), mmio +PHY_ACCESS);
do{
reg_val = readl(mmio + PHY_ACCESS);
udelay(30); /* It takes 30 us to read/write data */
} while (--repeat && (reg_val & PHY_CMD_ACTIVE));
if(reg_val & PHY_RD_ERR)
goto err_phy_read;
*val = reg_val & 0xffff;
return 0;
err_phy_read:
*val = 0;
return -EINVAL;
}
static int amd8111e_write_phy(struct amd8111e_priv* lp,int phy_id, int reg, u32 val)
{
unsigned int repeat = REPEAT_CNT
void * mmio = lp->mmio;
unsigned int reg_val;
reg_val = readl(mmio + PHY_ACCESS);
while (reg_val & PHY_CMD_ACTIVE)
reg_val = readl( mmio + PHY_ACCESS );
writel( PHY_WR_CMD | ((phy_id & 0x1f) << 21) |
((reg & 0x1f) << 16)|val, mmio + PHY_ACCESS);
do{
reg_val = readl(mmio + PHY_ACCESS);
udelay(30); /* It takes 30 us to read/write the data */
} while (--repeat && (reg_val & PHY_CMD_ACTIVE));
if(reg_val & PHY_RD_ERR)
goto err_phy_write;
return 0;
err_phy_write:
return -EINVAL;
}
static int amd8111e_ioctl(struct net_device * dev , struct ifreq *ifr, int cmd) static int amd8111e_ioctl(struct net_device * dev , struct ifreq *ifr, int cmd)
{ {
struct mii_ioctl_data *data = (struct mii_ioctl_data *)&ifr->ifr_data; struct mii_ioctl_data *data = (struct mii_ioctl_data *)&ifr->ifr_data;
...@@ -1320,15 +1572,10 @@ int amd8111e_change_mtu(struct net_device *dev, int new_mtu) ...@@ -1320,15 +1572,10 @@ int amd8111e_change_mtu(struct net_device *dev, int new_mtu)
dev->mtu = new_mtu; dev->mtu = new_mtu;
/* if (new_mtu > ETH_DATA_LEN)
lp->options |= OPTION_JUMBO_ENABLE;
else
lp->options &= ~OPTION_JUMBO_ENABLE;
*/
err = amd8111e_restart(dev); err = amd8111e_restart(dev);
spin_unlock_irq(&lp->lock); spin_unlock_irq(&lp->lock);
if(!err)
netif_start_queue(dev); netif_start_queue(dev);
return err; return err;
} }
...@@ -1354,73 +1601,41 @@ static int amd8111e_enable_magicpkt(struct amd8111e_priv* lp) ...@@ -1354,73 +1601,41 @@ static int amd8111e_enable_magicpkt(struct amd8111e_priv* lp)
{ {
writel( VAL1|MPPLBA, lp->mmio + CMD3); writel( VAL1|MPPLBA, lp->mmio + CMD3);
writel( VAL0|MPEN_SW, lp->mmio + CMD7); writel( VAL0|MPEN_SW, lp->mmio + CMD7);
/* To eliminate PCI posting bug */
readl(lp->mmio + CMD7);
return 0; return 0;
} }
static int amd8111e_enable_link_change(struct amd8111e_priv* lp) static int amd8111e_enable_link_change(struct amd8111e_priv* lp)
{ {
/* Adapter is already stoped/suspended/interrupt-disabled */ /* Adapter is already stoped/suspended/interrupt-disabled */
writel(VAL0|LCMODE_SW,lp->mmio + CMD7); writel(VAL0|LCMODE_SW,lp->mmio + CMD7);
/* To eliminate PCI posting bug */
readl(lp->mmio + CMD7);
return 0; return 0;
} }
/* This function is called when a packet transmission fails to complete within a resonable period, on the assumption that an interrupts have been failed or the interface is locked up. This function will reinitialize the hardware */
/* static void amd8111e_tx_timeout(struct net_device *dev)
This function sets the power state of the device. When the device go to lower power states 1,2, and 3 it enables the wake on lan
*/
static int amd8111e_set_power_state(struct amd8111e_priv* lp, u32 state)
{ {
u16 power_control; struct amd8111e_priv* lp = dev->priv;
int pm = lp->pm_cap; int err;
pci_read_config_word(lp->pci_dev,
pm + PCI_PM_CTRL,
&power_control);
power_control |= PCI_PM_CTRL_PME_STATUS;
power_control &= ~(PCI_PM_CTRL_STATE_MASK);
switch (state) {
case 0:
power_control |= 0;
pci_write_config_word(lp->pci_dev,
pm + PCI_PM_CTRL,
power_control);
return 0;
case 1:
power_control |= 1;
break;
case 2:
power_control |= 2;
break;
case 3:
power_control |= 3;
break;
default:
printk(KERN_WARNING "%s: Invalid power state (%d) requested.\n",
lp->amd8111e_net_dev->name, state);
return -EINVAL;
}
if(lp->options & OPTION_WAKE_MAGIC_ENABLE)
amd8111e_enable_magicpkt(lp);
if(lp->options & OPTION_WAKE_PHY_ENABLE)
amd8111e_enable_link_change(lp);
/* Setting new power state. */
pci_write_config_word(lp->pci_dev, pm + PCI_PM_CTRL, power_control);
return 0;
printk(KERN_ERR "%s: transmit timed out, resetting\n",
dev->name);
spin_lock_irq(&lp->lock);
err = amd8111e_restart(dev);
spin_unlock_irq(&lp->lock);
if(!err)
netif_wake_queue(dev);
} }
static int amd8111e_suspend(struct pci_dev *pci_dev, u32 state) static int amd8111e_suspend(struct pci_dev *pci_dev, u32 state)
{ {
struct net_device *dev = pci_get_drvdata(pci_dev); struct net_device *dev = pci_get_drvdata(pci_dev);
struct amd8111e_priv *lp = dev->priv; struct amd8111e_priv *lp = dev->priv;
int err;
if (!netif_running(dev)) if (!netif_running(dev))
return 0; return 0;
...@@ -1434,37 +1649,54 @@ static int amd8111e_suspend(struct pci_dev *pci_dev, u32 state) ...@@ -1434,37 +1649,54 @@ static int amd8111e_suspend(struct pci_dev *pci_dev, u32 state)
/* stop chip */ /* stop chip */
spin_lock_irq(&lp->lock); spin_lock_irq(&lp->lock);
if(lp->options & OPTION_DYN_IPG_ENABLE)
del_timer_sync(&lp->ipg_data.ipg_timer);
amd8111e_stop_chip(lp); amd8111e_stop_chip(lp);
spin_unlock_irq(&lp->lock); spin_unlock_irq(&lp->lock);
err = amd8111e_set_power_state(lp, state); if(lp->options & OPTION_WOL_ENABLE){
if (err) { /* enable wol */
if(lp->options & OPTION_WAKE_MAGIC_ENABLE)
amd8111e_enable_magicpkt(lp);
if(lp->options & OPTION_WAKE_PHY_ENABLE)
amd8111e_enable_link_change(lp);
spin_lock_irq(&lp->lock); pci_enable_wake(pci_dev, 3, 1);
amd8111e_restart(dev); pci_enable_wake(pci_dev, 4, 1); /* D3 cold */
spin_unlock_irq(&lp->lock);
netif_device_attach(dev);
} }
return err; else{
pci_enable_wake(pci_dev, 3, 0);
pci_enable_wake(pci_dev, 4, 0); /* 4 == D3 cold */
}
pci_save_state(pci_dev, lp->pm_state);
pci_set_power_state(pci_dev, 3);
return 0;
} }
static int amd8111e_resume(struct pci_dev *pci_dev) static int amd8111e_resume(struct pci_dev *pci_dev)
{ {
struct net_device *dev = pci_get_drvdata(pci_dev); struct net_device *dev = pci_get_drvdata(pci_dev);
struct amd8111e_priv *lp = dev->priv; struct amd8111e_priv *lp = dev->priv;
int err;
if (!netif_running(dev)) if (!netif_running(dev))
return 0; return 0;
err = amd8111e_set_power_state(lp, 0); pci_set_power_state(pci_dev, 0);
if (err) pci_restore_state(pci_dev, lp->pm_state);
return err;
pci_enable_wake(pci_dev, 3, 0);
pci_enable_wake(pci_dev, 4, 0); /* D3 cold */
netif_device_attach(dev); netif_device_attach(dev);
spin_lock_irq(&lp->lock); spin_lock_irq(&lp->lock);
amd8111e_restart(dev); amd8111e_restart(dev);
/* Restart ipg timer */
if(lp->options & OPTION_DYN_IPG_ENABLE)
mod_timer(&lp->ipg_data.ipg_timer,
jiffies + (IPG_CONVERGE_TIME * HZ));
spin_unlock_irq(&lp->lock); spin_unlock_irq(&lp->lock);
return 0; return 0;
...@@ -1483,6 +1715,65 @@ static void __devexit amd8111e_remove_one(struct pci_dev *pdev) ...@@ -1483,6 +1715,65 @@ static void __devexit amd8111e_remove_one(struct pci_dev *pdev)
pci_set_drvdata(pdev, NULL); pci_set_drvdata(pdev, NULL);
} }
} }
static void amd8111e_config_ipg(struct net_device* dev)
{
struct amd8111e_priv *lp = dev->priv;
struct ipg_info* ipg_data = &lp->ipg_data;
void * mmio = lp->mmio;
unsigned int prev_col_cnt = ipg_data->col_cnt;
unsigned int total_col_cnt;
unsigned int tmp_ipg;
if(lp->link_config.duplex == DUPLEX_FULL){
ipg_data->ipg = DEFAULT_IPG;
return;
}
if(ipg_data->ipg_state == SSTATE){
if(ipg_data->timer_tick == IPG_STABLE_TIME){
ipg_data->timer_tick = 0;
ipg_data->ipg = MIN_IPG - IPG_STEP;
ipg_data->current_ipg = MIN_IPG;
ipg_data->diff_col_cnt = 0xFFFFFFFF;
ipg_data->ipg_state = CSTATE;
}
else
ipg_data->timer_tick++;
}
if(ipg_data->ipg_state == CSTATE){
/* Get the current collision count */
total_col_cnt = ipg_data->col_cnt =
amd8111e_read_mib(mmio, xmt_collisions);
if ((total_col_cnt - prev_col_cnt) <
(ipg_data->diff_col_cnt)){
ipg_data->diff_col_cnt =
total_col_cnt - prev_col_cnt ;
ipg_data->ipg = ipg_data->current_ipg;
}
ipg_data->current_ipg += IPG_STEP;
if (ipg_data->current_ipg <= MAX_IPG)
tmp_ipg = ipg_data->current_ipg;
else{
tmp_ipg = ipg_data->ipg;
ipg_data->ipg_state = SSTATE;
}
writew((u32)tmp_ipg, mmio + IPG);
writew((u32)(tmp_ipg - IFS1_DELTA), mmio + IFS1);
}
mod_timer(&lp->ipg_data.ipg_timer, jiffies + (IPG_CONVERGE_TIME * HZ));
return;
}
static int __devinit amd8111e_probe_one(struct pci_dev *pdev, static int __devinit amd8111e_probe_one(struct pci_dev *pdev,
const struct pci_device_id *ent) const struct pci_device_id *ent)
...@@ -1491,7 +1782,6 @@ static int __devinit amd8111e_probe_one(struct pci_dev *pdev, ...@@ -1491,7 +1782,6 @@ static int __devinit amd8111e_probe_one(struct pci_dev *pdev,
unsigned long reg_addr,reg_len; unsigned long reg_addr,reg_len;
struct amd8111e_priv* lp; struct amd8111e_priv* lp;
struct net_device* dev; struct net_device* dev;
unsigned int chip_version;
err = pci_enable_device(pdev); err = pci_enable_device(pdev);
if(err){ if(err){
...@@ -1542,7 +1832,6 @@ static int __devinit amd8111e_probe_one(struct pci_dev *pdev, ...@@ -1542,7 +1832,6 @@ static int __devinit amd8111e_probe_one(struct pci_dev *pdev,
} }
SET_MODULE_OWNER(dev); SET_MODULE_OWNER(dev);
SET_NETDEV_DEV(dev, &pdev->dev);
#if AMD8111E_VLAN_TAG_USED #if AMD8111E_VLAN_TAG_USED
dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX ; dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX ;
...@@ -1551,11 +1840,16 @@ static int __devinit amd8111e_probe_one(struct pci_dev *pdev, ...@@ -1551,11 +1840,16 @@ static int __devinit amd8111e_probe_one(struct pci_dev *pdev,
#endif #endif
lp = dev->priv; lp = dev->priv;
memset (lp, 0, sizeof (*lp));
lp->pci_dev = pdev; lp->pci_dev = pdev;
lp->amd8111e_net_dev = dev; lp->amd8111e_net_dev = dev;
lp->pm_cap = pm_cap; lp->pm_cap = pm_cap;
/* setting mii default values */
lp->mii_if.dev = dev;
lp->mii_if.mdio_read = amd8111e_mdio_read;
lp->mii_if.mdio_write = amd8111e_mdio_write;
lp->mii_if.phy_id = PHY_ID;
spin_lock_init(&lp->lock); spin_lock_init(&lp->lock);
lp->mmio = ioremap(reg_addr, reg_len); lp->mmio = ioremap(reg_addr, reg_len);
...@@ -1569,12 +1863,14 @@ static int __devinit amd8111e_probe_one(struct pci_dev *pdev, ...@@ -1569,12 +1863,14 @@ static int __devinit amd8111e_probe_one(struct pci_dev *pdev,
/* Initializing MAC address */ /* Initializing MAC address */
for(i = 0; i < ETH_ADDR_LEN; i++) for(i = 0; i < ETH_ADDR_LEN; i++)
dev->dev_addr[i] =readb(lp->mmio + PADR + i); dev->dev_addr[i] =readb(lp->mmio + PADR + i);
/* Setting user defined speed */
if (speed_duplex[card_idx] > sizeof(speed_duplex_mapping)) /* Setting user defined parametrs */
lp->ext_phy_option = XPHYANE; lp->ext_phy_option = speed_duplex[card_idx];
else if(coalesce[card_idx])
lp->ext_phy_option = lp->options |= OPTION_INTR_COAL_ENABLE;
speed_duplex_mapping[speed_duplex[card_idx]]; if(dynamic_ipg[card_idx++])
lp->options |= OPTION_DYN_IPG_ENABLE;
/* Initialize driver entry points */ /* Initialize driver entry points */
dev->open = amd8111e_open; dev->open = amd8111e_open;
dev->hard_start_xmit = amd8111e_start_xmit; dev->hard_start_xmit = amd8111e_start_xmit;
...@@ -1584,6 +1880,8 @@ static int __devinit amd8111e_probe_one(struct pci_dev *pdev, ...@@ -1584,6 +1880,8 @@ static int __devinit amd8111e_probe_one(struct pci_dev *pdev,
dev->do_ioctl = amd8111e_ioctl; dev->do_ioctl = amd8111e_ioctl;
dev->change_mtu = amd8111e_change_mtu; dev->change_mtu = amd8111e_change_mtu;
dev->irq =pdev->irq; dev->irq =pdev->irq;
dev->tx_timeout = amd8111e_tx_timeout;
dev->watchdog_timeo = AMD8111E_TX_TIMEOUT;
#if AMD8111E_VLAN_TAG_USED #if AMD8111E_VLAN_TAG_USED
dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
...@@ -1593,10 +1891,6 @@ static int __devinit amd8111e_probe_one(struct pci_dev *pdev, ...@@ -1593,10 +1891,6 @@ static int __devinit amd8111e_probe_one(struct pci_dev *pdev,
/* Set receive buffer length and set jumbo option*/ /* Set receive buffer length and set jumbo option*/
amd8111e_set_rx_buff_len(dev); amd8111e_set_rx_buff_len(dev);
/* dev->tx_timeout = tg3_tx_timeout; */
/* dev->watchdog_timeo = TG3_TX_TIMEOUT; */
err = register_netdev(dev); err = register_netdev(dev);
if (err) { if (err) {
...@@ -1607,15 +1901,26 @@ static int __devinit amd8111e_probe_one(struct pci_dev *pdev, ...@@ -1607,15 +1901,26 @@ static int __devinit amd8111e_probe_one(struct pci_dev *pdev,
pci_set_drvdata(pdev, dev); pci_set_drvdata(pdev, dev);
/* Initialize software ipg timer */
if(lp->options & OPTION_DYN_IPG_ENABLE){
init_timer(&lp->ipg_data.ipg_timer);
lp->ipg_data.ipg_timer.data = (unsigned long) dev;
lp->ipg_data.ipg_timer.function = (void *)&amd8111e_config_ipg;
lp->ipg_data.ipg_timer.expires = jiffies +
IPG_CONVERGE_TIME * HZ;
lp->ipg_data.ipg = DEFAULT_IPG;
lp->ipg_data.ipg_state = CSTATE;
};
/* display driver and device information */ /* display driver and device information */
chip_version = (readl(lp->mmio + CHIPID) & 0xf0000000)>>28; chip_version = (readl(lp->mmio + CHIPID) & 0xf0000000)>>28;
printk("%s: AMD-8111e Driver Version: %s\n",dev->name,MODULE_VERSION); printk(KERN_INFO "%s: AMD-8111e Driver Version: %s\n", dev->name,MODULE_VERSION);
printk("%s: [ Rev %x ] PCI 10/100BaseT Ethernet ", dev->name, chip_version); printk(KERN_INFO "%s: [ Rev %x ] PCI 10/100BaseT Ethernet ", dev->name, chip_version);
for (i = 0; i < 6; i++) for (i = 0; i < 6; i++)
printk("%2.2x%c", dev->dev_addr[i],i == 5 ? ' ' : ':'); printk("%2.2x%c",dev->dev_addr[i],i == 5 ? ' ' : ':');
printk("\n"); printk( "\n");
return 0; return 0;
err_iounmap: err_iounmap:
iounmap((void *) lp->mmio); iounmap((void *) lp->mmio);
......
/* /*
* Advanced Micro Devices Inc. AMD8111E Linux Network Driver
* Copyright (C) 2003 Advanced Micro Devices
*
* This program is free software; you can redistribute it and/or modify * This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by * it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or * the Free Software Foundation; either version 2 of the License, or
...@@ -27,73 +30,14 @@ Module Name: ...@@ -27,73 +30,14 @@ Module Name:
Kernel Mode Kernel Mode
Revision History: Revision History:
3.0.0
Initial Revision.
3.0.1
*/ */
#ifndef _AMD811E_H #ifndef _AMD811E_H
#define _AMD811E_H #define _AMD811E_H
/* Hardware definitions */
#define B31_MASK 0x80000000
#define B30_MASK 0X40000000
#define B29_MASK 0x20000000
#define B28_MASK 0x10000000
#define B27_MASK 0x08000000
#define B26_MASK 0x04000000
#define B25_MASK 0x02000000
#define B24_MASK 0x01000000
#define B23_MASK 0x00800000
#define B22_MASK 0x00400000
#define B21_MASK 0x00200000
#define B20_MASK 0x00100000
#define B19_MASK 0x00080000
#define B18_MASK 0x00040000
#define B17_MASK 0x00020000
#define B16_MASK 0x00010000
#define B15_MASK 0x8000
#define B14_MASK 0x4000
#define B13_MASK 0x2000
#define B12_MASK 0x1000
#define B11_MASK 0x0800
#define B10_MASK 0x0400
#define B9_MASK 0x0200
#define B8_MASK 0x0100
#define B7_MASK 0x0080
#define B6_MASK 0x0040
#define B5_MASK 0x0020
#define B4_MASK 0x0010
#define B3_MASK 0x0008
#define B2_MASK 0x0004
#define B1_MASK 0x0002
#define B0_MASK 0x0001
/* PCI register offset */
#define PCI_ID_REG 0x00
#define PCI_COMMAND_REG 0x04
/* #define MEMEN_BIT B1_MASK */
/* #define IOEN_BIT B0_MASK */
#define PCI_REV_ID_REG 0x08
#define PCI_MEM_BASE_REG 0x10
/* #define MEMBASE_MASK 0xFFFFF000 */
/* #define MEMBASE_SIZE 4096 */
#define PCI_INTR_REG 0x3C
#define PCI_STATUS_REG 0x06
#define PCI_CAP_ID_REG_OFFSET 0x34
#define PCI_PMC_REG_OFFSET 0x36
#define PCI_PMCSR_REG_OFFSET 0x38
/* #define NEW_CAP 0x0010 */
#define PME_EN 0x0100
#define PARTID_MASK 0xFFFFF000
#define PARTID_START_BIT 12
/* #define LANCE_DWIO_RESET_PORT 0x18
#define LANCE_WIO_RESET_PORT 0x14 */
#define MIB_OFFSET 0x28
/* Command style register access /* Command style register access
Registers CMD0, CMD2, CMD3,CMD7 and INTEN0 uses a write access technique called command style access. It allows the write to selected bits of this register without altering the bits that are not selected. Command style registers are divided into 4 bytes that can be written independently. Higher order bit of each byte is the value bit that specifies the value that will be written into the selected bits of register. Registers CMD0, CMD2, CMD3,CMD7 and INTEN0 uses a write access technique called command style access. It allows the write to selected bits of this register without altering the bits that are not selected. Command style registers are divided into 4 bytes that can be written independently. Higher order bit of each byte is the value bit that specifies the value that will be written into the selected bits of register.
...@@ -155,7 +99,7 @@ eg., if the value 10011010b is written into the least significant byte of a comm ...@@ -155,7 +99,7 @@ eg., if the value 10011010b is written into the least significant byte of a comm
#define XMT_RING_LEN2 0x148 /* Transmit Ring2 length register */ #define XMT_RING_LEN2 0x148 /* Transmit Ring2 length register */
#define XMT_RING_LEN3 0x14C /* Transmit Ring3 length register */ #define XMT_RING_LEN3 0x14C /* Transmit Ring3 length register */
#define RCV_RING_LEN0 0x150 /* Transmit Ring0 length register */ #define RCV_RING_LEN0 0x150 /* Receive Ring0 length register */
#define SRAM_SIZE 0x178 /* SRAM size register */ #define SRAM_SIZE 0x178 /* SRAM size register */
#define SRAM_BOUNDARY 0x17A /* SRAM boundary register */ #define SRAM_BOUNDARY 0x17A /* SRAM boundary register */
...@@ -164,391 +108,398 @@ eg., if the value 10011010b is written into the least significant byte of a comm ...@@ -164,391 +108,398 @@ eg., if the value 10011010b is written into the least significant byte of a comm
#define PADR 0x160 /* Physical address register */ #define PADR 0x160 /* Physical address register */
#define IFS1 0x18C /* Inter-frame spacing Part1 register */
#define IFS 0x18D /* Inter-frame spacing register */
#define IPG 0x18E /* Inter-frame gap register */
/* 64bit register */ /* 64bit register */
#define LADRF 0x168 /* Logical address filter register */ #define LADRF 0x168 /* Logical address filter register */
/* 8bit regsisters */
#define IFS1 0x18C /* Inter-frame spacing Part1 register */
#define IFS 0x18D /* Inter-frame spacing register */
/* Register Bit Definitions */ /* Register Bit Definitions */
typedef enum {
ASF_INIT_DONE = (1 << 1),
ASF_INIT_PRESENT = (1 << 0),
}STAT_ASF_BITS;
typedef enum {
MIB_CMD_ACTIVE = (1 << 15 ),
MIB_RD_CMD = (1 << 13 ),
MIB_CLEAR = (1 << 12 ),
MIB_ADDRESS = (1 << 0) | (1 << 1) | (1 << 2) | (1 << 3)|
(1 << 4) | (1 << 5),
}MIB_ADDR_BITS;
typedef enum {
PMAT_DET = (1 << 12),
MP_DET = (1 << 11),
LC_DET = (1 << 10),
SPEED_MASK = (1 << 9)|(1 << 8)|(1 << 7),
FULL_DPLX = (1 << 6),
LINK_STATS = (1 << 5),
AUTONEG_COMPLETE = (1 << 4),
MIIPD = (1 << 3),
RX_SUSPENDED = (1 << 2),
TX_SUSPENDED = (1 << 1),
RUNNING = (1 << 0),
}STAT0_BITS;
/* STAT_ASF 0x00, 32bit register */
#define ASF_INIT_DONE B1_MASK
#define ASF_INIT_PRESENT B0_MASK
/* MIB_ADDR 0x14, 16bit register */
#define MIB_CMD_ACTIVE B15_MASK
#define MIB_RD_CMD B13_MASK
#define MIB_CLEAR B12_MASK
#define MIB_ADDRESS 0x0000003F /* 5:0 */
/* QOS_ADDR 0x1C, 16bit register */
#define QOS_CMD_ACTIVE B15_MASK
#define QOS_WR_CMD B14_MASK
#define QOS_RD_CMD B13_MASK
#define QOS_ADDRESS 0x0000001F /* 4:0 */
/* STAT0 0x30, 32bit register */
#define PAUSE_PEND B14_MASK
#define PAUSING B13_MASK
#define PMAT_DET B12_MASK
#define MP_DET B11_MASK
#define LC_DET B10_MASK
#define SPEED_MASK 0x0380 /* 9:7 */
#define FULL_DPLX B6_MASK
#define LINK_STATS B5_MASK
#define AUTONEG_COMPLETE B4_MASK
#define MIIPD B3_MASK
#define RX_SUSPENDED B2_MASK
#define TX_SUSPENDED B1_MASK
#define RUNNING B0_MASK
#define PHY_SPEED_10 0x2 #define PHY_SPEED_10 0x2
#define PHY_SPEED_100 0x3 #define PHY_SPEED_100 0x3
/* INT0 0x38, 32bit register */ /* INT0 0x38, 32bit register */
#define INTR B31_MASK typedef enum {
#define PCSINT B28_MASK
#define LCINT B27_MASK INTR = (1 << 31),
#define APINT5 B26_MASK PCSINT = (1 << 28),
#define APINT4 B25_MASK LCINT = (1 << 27),
#define APINT3 B24_MASK APINT5 = (1 << 26),
#define TINT_SUM B23_MASK APINT4 = (1 << 25),
#define APINT2 B22_MASK APINT3 = (1 << 24),
#define APINT1 B21_MASK TINT_SUM = (1 << 23),
#define APINT0 B20_MASK APINT2 = (1 << 22),
#define MIIPDTINT B19_MASK APINT1 = (1 << 21),
#define MCCIINT B18_MASK APINT0 = (1 << 20),
#define MCCINT B17_MASK MIIPDTINT = (1 << 19),
#define MREINT B16_MASK MCCINT = (1 << 17),
#define RINT_SUM B15_MASK MREINT = (1 << 16),
#define SPNDINT B14_MASK RINT_SUM = (1 << 15),
#define MPINT B13_MASK SPNDINT = (1 << 14),
#define SINT B12_MASK MPINT = (1 << 13),
#define TINT3 B11_MASK SINT = (1 << 12),
#define TINT2 B10_MASK TINT3 = (1 << 11),
#define TINT1 B9_MASK TINT2 = (1 << 10),
#define TINT0 B8_MASK TINT1 = (1 << 9),
#define UINT B7_MASK TINT0 = (1 << 8),
#define STINT B4_MASK UINT = (1 << 7),
#define RINT3 B3_MASK STINT = (1 << 4),
#define RINT2 B2_MASK RINT0 = (1 << 0),
#define RINT1 B1_MASK
#define RINT0 B0_MASK }INT0_BITS;
/* INTEN0 0x40, 32bit register */ typedef enum {
#define VAL3 B31_MASK /* VAL bit for byte 3 */
#define VAL2 B23_MASK /* VAL bit for byte 2 */ VAL3 = (1 << 31), /* VAL bit for byte 3 */
#define VAL1 B15_MASK /* VAL bit for byte 1 */ VAL2 = (1 << 23), /* VAL bit for byte 2 */
#define VAL0 B7_MASK /* VAL bit for byte 0 */ VAL1 = (1 << 15), /* VAL bit for byte 1 */
/* VAL3 */ VAL0 = (1 << 7), /* VAL bit for byte 0 */
#define PSCINTEN B28_MASK
#define LCINTEN B27_MASK }VAL_BITS;
#define APINT5EN B26_MASK
#define APINT4EN B25_MASK typedef enum {
#define APINT3EN B24_MASK
/* VAL2 */ /* VAL3 */
#define APINT2EN B22_MASK LCINTEN = (1 << 27),
#define APINT1EN B21_MASK APINT5EN = (1 << 26),
#define APINT0EN B20_MASK APINT4EN = (1 << 25),
#define MIIPDTINTEN B19_MASK APINT3EN = (1 << 24),
#define MCCIINTEN B18_MASK /* VAL2 */
#define MCCINTEN B17_MASK APINT2EN = (1 << 22),
#define MREINTEN B16_MASK APINT1EN = (1 << 21),
/* VAL1 */ APINT0EN = (1 << 20),
#define SPNDINTEN B14_MASK MIIPDTINTEN = (1 << 19),
#define MPINTEN B13_MASK MCCIINTEN = (1 << 18),
#define SINTEN B12_MASK MCCINTEN = (1 << 17),
#define TINTEN3 B11_MASK MREINTEN = (1 << 16),
#define TINTEN2 B10_MASK /* VAL1 */
#define TINTEN1 B9_MASK SPNDINTEN = (1 << 14),
#define TINTEN0 B8_MASK MPINTEN = (1 << 13),
/* VAL0 */ TINTEN3 = (1 << 11),
#define STINTEN B4_MASK SINTEN = (1 << 12),
#define RINTEN3 B3_MASK TINTEN2 = (1 << 10),
#define RINTEN2 B2_MASK TINTEN1 = (1 << 9),
#define RINTEN1 B1_MASK TINTEN0 = (1 << 8),
#define RINTEN0 B0_MASK /* VAL0 */
STINTEN = (1 << 4),
#define INTEN0_CLEAR 0x1F7F7F1F /* Command style register */ RINTEN0 = (1 << 0),
/* CMD0 0x48, 32bit register */ INTEN0_CLEAR = 0x1F7F7F1F, /* Command style register */
/* VAL2 */
#define RDMD3 B19_MASK }INTEN0_BITS;
#define RDMD2 B18_MASK
#define RDMD1 B17_MASK typedef enum {
#define RDMD0 B16_MASK /* VAL2 */
/* VAL1 */ RDMD0 = (1 << 16),
#define TDMD3 B11_MASK /* VAL1 */
#define TDMD2 B10_MASK TDMD3 = (1 << 11),
#define TDMD1 B9_MASK TDMD2 = (1 << 10),
#define TDMD0 B8_MASK TDMD1 = (1 << 9),
/* VAL0 */ TDMD0 = (1 << 8),
#define UINTCMD B6_MASK /* VAL0 */
#define RX_FAST_SPND B5_MASK UINTCMD = (1 << 6),
#define TX_FAST_SPND B4_MASK RX_FAST_SPND = (1 << 5),
#define RX_SPND B3_MASK TX_FAST_SPND = (1 << 4),
#define TX_SPND B2_MASK RX_SPND = (1 << 3),
#define INTREN B1_MASK TX_SPND = (1 << 2),
#define RUN B0_MASK INTREN = (1 << 1),
RUN = (1 << 0),
#define CMD0_CLEAR 0x000F0F7F /* Command style register */
CMD0_CLEAR = 0x000F0F7F, /* Command style register */
/* CMD2 0x50, 32bit register */
/* VAL3 */ }CMD0_BITS;
#define CONDUIT_MODE B29_MASK
/* VAL2 */ typedef enum {
#define RPA B19_MASK
#define DRCVPA B18_MASK /* VAL3 */
#define DRCVBC B17_MASK CONDUIT_MODE = (1 << 29),
#define PROM B16_MASK /* VAL2 */
/* VAL1 */ RPA = (1 << 19),
#define ASTRP_RCV B13_MASK DRCVPA = (1 << 18),
#define FCOLL B12_MASK DRCVBC = (1 << 17),
#define EMBA B11_MASK PROM = (1 << 16),
#define DXMT2PD B10_MASK /* VAL1 */
#define LTINTEN B9_MASK ASTRP_RCV = (1 << 13),
#define DXMTFCS B8_MASK RCV_DROP0 = (1 << 12),
/* VAL0 */ EMBA = (1 << 11),
#define APAD_XMT B6_MASK DXMT2PD = (1 << 10),
#define DRTY B5_MASK LTINTEN = (1 << 9),
#define INLOOP B4_MASK DXMTFCS = (1 << 8),
#define EXLOOP B3_MASK /* VAL0 */
#define REX_RTRY B2_MASK APAD_XMT = (1 << 6),
#define REX_UFLO B1_MASK DRTY = (1 << 5),
#define REX_LCOL B0_MASK INLOOP = (1 << 4),
EXLOOP = (1 << 3),
#define CMD2_CLEAR 0x3F7F3F7F /* Command style register */ REX_RTRY = (1 << 2),
REX_UFLO = (1 << 1),
/* CMD3 0x54, 32bit register */ REX_LCOL = (1 << 0),
/* VAL3 */
#define ASF_INIT_DONE_ALIAS B29_MASK CMD2_CLEAR = 0x3F7F3F7F, /* Command style register */
/* VAL2 */
#define JUMBO B21_MASK }CMD2_BITS;
#define VSIZE B20_MASK
#define VLONLY B19_MASK typedef enum {
#define VL_TAG_DEL B18_MASK
/* VAL1 */ /* VAL3 */
#define EN_PMGR B14_MASK ASF_INIT_DONE_ALIAS = (1 << 29),
#define INTLEVEL B13_MASK /* VAL2 */
#define FORCE_FULL_DUPLEX B12_MASK JUMBO = (1 << 21),
#define FORCE_LINK_STATUS B11_MASK VSIZE = (1 << 20),
#define APEP B10_MASK VLONLY = (1 << 19),
#define MPPLBA B9_MASK VL_TAG_DEL = (1 << 18),
/* VAL0 */ /* VAL1 */
#define RESET_PHY_PULSE B2_MASK EN_PMGR = (1 << 14),
#define RESET_PHY B1_MASK INTLEVEL = (1 << 13),
#define PHY_RST_POL B0_MASK FORCE_FULL_DUPLEX = (1 << 12),
/* CMD7 0x64, 32bit register */ FORCE_LINK_STATUS = (1 << 11),
/* VAL0 */ APEP = (1 << 10),
#define PMAT_SAVE_MATCH B4_MASK MPPLBA = (1 << 9),
#define PMAT_MODE B3_MASK /* VAL0 */
#define MPEN_SW B1_MASK RESET_PHY_PULSE = (1 << 2),
#define LCMODE_SW B0_MASK RESET_PHY = (1 << 1),
PHY_RST_POL = (1 << 0),
#define CMD7_CLEAR 0x0000001B /* Command style register */
/* CTRL0 0x68, 32bit register */ }CMD3_BITS;
#define PHY_SEL 0x03000000 /* 25:24 */
#define RESET_PHY_WIDTH 0x00FF0000 /* 23:16 */
#define BSWP_REGS B10_MASK typedef enum {
#define BSWP_DESC B9_MASK
#define BSWP_DATA B8_MASK /* VAL0 */
#define CACHE_ALIGN B4_MASK PMAT_SAVE_MATCH = (1 << 4),
#define BURST_LIMIT 0x0000000F /* 3:0 */ PMAT_MODE = (1 << 3),
MPEN_SW = (1 << 1),
/* CTRL1 0x6C, 32bit register */ LCMODE_SW = (1 << 0),
#define SLOTMOD_MASK 0x03000000 /* 25:24 */
#define XMTSP_MASK 0x300 /* 17:16 */ CMD7_CLEAR = 0x0000001B /* Command style register */
#define XMTSP_128 0x200
#define XMTSP_64 0x100 }CMD7_BITS;
#define CRTL1_DEFAULT 0x00000017
/* CTRL2 0x70, 32bit register */ typedef enum {
#define FS_MASK 0x00070000 /* 18:16 */
#define FMDC_MASK 0x00000300 /* 9:8 */ RESET_PHY_WIDTH = (0xF << 16) | (0xF<< 20), /* 0x00FF0000 */
#define XPHYRST B7_MASK XMTSP_MASK = (1 << 9) | (1 << 8), /* 9:8 */
#define XPHYANE B6_MASK XMTSP_128 = (1 << 9), /* 9 */
#define XPHYFD B5_MASK XMTSP_64 = (1 << 8),
#define XPHYSP B3_MASK /* 4:3 */ CACHE_ALIGN = (1 << 4),
#define APDW_MASK 0x00000007 /* 2:0 */ BURST_LIMIT_MASK = (0xF << 0 ),
CTRL1_DEFAULT = 0x00010111,
/* RCV_RING_CFG 0x78, 16bit register */
#define RCV_DROP3 B11_MASK }CTRL1_BITS;
#define RCV_DROP2 B10_MASK
#define RCV_DROP1 B9_MASK typedef enum {
#define RCV_DROP0 B8_MASK
#define RCV_RING_DEFAULT 0x0030 /* 5:4 */ FMDC_MASK = (1 << 9)|(1 << 8), /* 9:8 */
#define RCV_RING3_EN B3_MASK XPHYRST = (1 << 7),
#define RCV_RING2_EN B2_MASK XPHYANE = (1 << 6),
#define RCV_RING1_EN B1_MASK XPHYFD = (1 << 5),
#define RCV_RING0_EN B0_MASK XPHYSP = (1 << 4) | (1 << 3), /* 4:3 */
APDW_MASK = (1 << 2) | (1 << 1) | (1 << 0), /* 2:0 */
}CTRL2_BITS;
/* XMT_RING_LIMIT 0x7C, 32bit register */ /* XMT_RING_LIMIT 0x7C, 32bit register */
#define XMT_RING2_LIMIT 0x00FF0000 /* 23:16 */ typedef enum {
#define XMT_RING1_LIMIT 0x0000FF00 /* 15:8 */
#define XMT_RING0_LIMIT 0x000000FF /* 7:0 */ XMT_RING2_LIMIT = (0xFF << 16), /* 23:16 */
XMT_RING1_LIMIT = (0xFF << 8), /* 15:8 */
XMT_RING0_LIMIT = (0xFF << 0), /* 7:0 */
}XMT_RING_LIMIT_BITS;
typedef enum {
/* AUTOPOLL0 0x88, 16bit register */ AP_REG0_EN = (1 << 15),
#define AP_REG0_EN B15_MASK AP_REG0_ADDR_MASK = (0xF << 8) |(1 << 12),/* 12:8 */
#define AP_REG0_ADDR_MASK 0x1F00 /* 12:8 */ AP_PHY0_ADDR_MASK = (0xF << 0) |(1 << 4),/* 4:0 */
#define AP_PHY0_ADDR_MASK 0x001F /* 4:0 */
}AUTOPOLL0_BITS;
/* AUTOPOLL1 0x8A, 16bit register */ /* AUTOPOLL1 0x8A, 16bit register */
#define AP_REG1_EN B15_MASK typedef enum {
#define AP_REG1_ADDR_MASK 0x1F00 /* 12:8 */
#define AP_PRE_SUP1 B6_MASK AP_REG1_EN = (1 << 15),
#define AP_PHY1_DFLT B5_MASK AP_REG1_ADDR_MASK = (0xF << 8) |(1 << 12),/* 12:8 */
#define AP_PHY1_ADDR_MASK 0x001F /* 4:0 */ AP_PRE_SUP1 = (1 << 6),
AP_PHY1_DFLT = (1 << 5),
/* AUTOPOLL2 0x8C, 16bit register */ AP_PHY1_ADDR_MASK = (0xF << 0) |(1 << 4),/* 4:0 */
#define AP_REG2_EN B15_MASK
#define AP_REG2_ADDR_MASK 0x1F00 /* 12:8 */ }AUTOPOLL1_BITS;
#define AP_PRE_SUP2 B6_MASK
#define AP_PHY2_DFLT B5_MASK
#define AP_PHY2_ADDR_MASK 0x001F /* 4:0 */ typedef enum {
/* AUTOPOLL3 0x8E, 16bit register */ AP_REG2_EN = (1 << 15),
#define AP_REG3_EN B15_MASK AP_REG2_ADDR_MASK = (0xF << 8) |(1 << 12),/* 12:8 */
#define AP_REG3_ADDR_MASK 0x1F00 /* 12:8 */ AP_PRE_SUP2 = (1 << 6),
#define AP_PRE_SUP3 B6_MASK AP_PHY2_DFLT = (1 << 5),
#define AP_PHY3_DFLT B5_MASK AP_PHY2_ADDR_MASK = (0xF << 0) |(1 << 4),/* 4:0 */
#define AP_PHY3_ADDR_MASK 0x001F /* 4:0 */
}AUTOPOLL2_BITS;
/* AUTOPOLL4 0x90, 16bit register */
#define AP_REG4_EN B15_MASK typedef enum {
#define AP_REG4_ADDR_MASK 0x1F00 /* 12:8 */
#define AP_PRE_SUP4 B6_MASK AP_REG3_EN = (1 << 15),
#define AP_PHY4_DFLT B5_MASK AP_REG3_ADDR_MASK = (0xF << 8) |(1 << 12),/* 12:8 */
#define AP_PHY4_ADDR_MASK 0x001F /* 4:0 */ AP_PRE_SUP3 = (1 << 6),
AP_PHY3_DFLT = (1 << 5),
/* AUTOPOLL5 0x92, 16bit register */ AP_PHY3_ADDR_MASK = (0xF << 0) |(1 << 4),/* 4:0 */
#define AP_REG5_EN B15_MASK
#define AP_REG5_ADDR_MASK 0x1F00 /* 12:8 */ }AUTOPOLL3_BITS;
#define AP_PRE_SUP5 B6_MASK
#define AP_PHY5_DFLT B5_MASK
#define AP_PHY5_ADDR_MASK 0x001F /* 4:0 */ typedef enum {
AP_REG4_EN = (1 << 15),
AP_REG4_ADDR_MASK = (0xF << 8) |(1 << 12),/* 12:8 */
AP_PRE_SUP4 = (1 << 6),
AP_PHY4_DFLT = (1 << 5),
AP_PHY4_ADDR_MASK = (0xF << 0) |(1 << 4),/* 4:0 */
}AUTOPOLL4_BITS;
typedef enum {
AP_REG5_EN = (1 << 15),
AP_REG5_ADDR_MASK = (0xF << 8) |(1 << 12),/* 12:8 */
AP_PRE_SUP5 = (1 << 6),
AP_PHY5_DFLT = (1 << 5),
AP_PHY5_ADDR_MASK = (0xF << 0) |(1 << 4),/* 4:0 */
}AUTOPOLL5_BITS;
/* AP_VALUE 0x98, 32bit ragister */ /* AP_VALUE 0x98, 32bit ragister */
#define AP_VAL_ACTIVE B31_MASK typedef enum {
#define AP_VAL_RD_CMD B29_MASK
#define AP_ADDR 0x00070000 /* 18:16 */ AP_VAL_ACTIVE = (1 << 31),
#define AP_VAL 0x0000FFFF /* 15:0 */ AP_VAL_RD_CMD = ( 1 << 29),
AP_ADDR = (1 << 18)|(1 << 17)|(1 << 16), /* 18:16 */
/* PCS_ANEG 0x9C, 32bit register */ AP_VAL = (0xF << 0) | (0xF << 4) |( 0xF << 8) |
#define SYNC_LOST B10_MASK (0xF << 12), /* 15:0 */
#define IMATCH B9_MASK
#define CMATCH B8_MASK }AP_VALUE_BITS;
#define PCS_AN_IDLE B1_MASK
#define PCS_AN_CFG B0_MASK typedef enum {
/* DLY_INT_A 0xA8, 32bit register */ DLY_INT_A_R3 = (1 << 31),
#define DLY_INT_A_R3 B31_MASK DLY_INT_A_R2 = (1 << 30),
#define DLY_INT_A_R2 B30_MASK DLY_INT_A_R1 = (1 << 29),
#define DLY_INT_A_R1 B29_MASK DLY_INT_A_R0 = (1 << 28),
#define DLY_INT_A_R0 B28_MASK DLY_INT_A_T3 = (1 << 27),
#define DLY_INT_A_T3 B27_MASK DLY_INT_A_T2 = (1 << 26),
#define DLY_INT_A_T2 B26_MASK DLY_INT_A_T1 = (1 << 25),
#define DLY_INT_A_T1 B25_MASK DLY_INT_A_T0 = ( 1 << 24),
#define DLY_INT_A_T0 B24_MASK EVENT_COUNT_A = (0xF << 16) | (0x1 << 20),/* 20:16 */
#define EVENT_COUNT_A 0x00FF0000 /* 20:16 */ MAX_DELAY_TIME_A = (0xF << 0) | (0xF << 4) | (1 << 8)|
#define MAX_DELAY_TIME_A 0x000007FF /* 10:0 */ (1 << 9) | (1 << 10), /* 10:0 */
/* DLY_INT_B 0xAC, 32bit register */ }DLY_INT_A_BITS;
#define DLY_INT_B_R3 B31_MASK
#define DLY_INT_B_R2 B30_MASK typedef enum {
#define DLY_INT_B_R1 B29_MASK
#define DLY_INT_B_R0 B28_MASK DLY_INT_B_R3 = (1 << 31),
#define DLY_INT_B_T3 B27_MASK DLY_INT_B_R2 = (1 << 30),
#define DLY_INT_B_T2 B26_MASK DLY_INT_B_R1 = (1 << 29),
#define DLY_INT_B_T1 B25_MASK DLY_INT_B_R0 = (1 << 28),
#define DLY_INT_B_T0 B24_MASK DLY_INT_B_T3 = (1 << 27),
#define EVENT_COUNT_B 0x00FF0000 /* 20:16 */ DLY_INT_B_T2 = (1 << 26),
#define MAX_DELAY_TIME_B 0x000007FF /* 10:0 */ DLY_INT_B_T1 = (1 << 25),
DLY_INT_B_T0 = ( 1 << 24),
/* DFC_THRESH2 0xC0, 16bit register */ EVENT_COUNT_B = (0xF << 16) | (0x1 << 20),/* 20:16 */
#define DFC_THRESH2_HIGH 0xFF00 /* 15:8 */ MAX_DELAY_TIME_B = (0xF << 0) | (0xF << 4) | (1 << 8)|
#define DFC_THRESH2_LOW 0x00FF /* 7:0 */ (1 << 9) | (1 << 10), /* 10:0 */
}DLY_INT_B_BITS;
/* DFC_THRESH3 0xC2, 16bit register */
#define DFC_THRESH3_HIGH 0xFF00 /* 15:8 */
#define DFC_THRESH3_LOW 0x00FF /* 7:0 */
/* DFC_THRESH0 0xC4, 16bit register */
#define DFC_THRESH0_HIGH 0xFF00 /* 15:8 */
#define DFC_THRESH0_LOW 0x00FF /* 7:0 */
/* DFC_THRESH1 0xC6, 16bit register */
#define DFC_THRESH1_HIGH 0xFF00 /* 15:8 */
#define DFC_THRESH1_LOW 0x00FF /* 7:0 */
/* FLOW_CONTROL 0xC8, 32bit register */ /* FLOW_CONTROL 0xC8, 32bit register */
#define PAUSE_LEN_CHG B30_MASK typedef enum {
#define FFC_EN B28_MASK
#define DFC_RING3_EN B27_MASK PAUSE_LEN_CHG = (1 << 30),
#define DFC_RING2_EN B26_MASK FTPE = (1 << 22),
#define DFC_RING1_EN B25_MASK FRPE = (1 << 21),
#define DFC_RING0_EN B24_MASK NAPA = (1 << 20),
#define FIXP_CONGEST B21_MASK NPA = (1 << 19),
#define FPA B20_MASK FIXP = ( 1 << 18),
#define NPA B19_MASK FCCMD = ( 1 << 16),
#define FIXP B18_MASK PAUSE_LEN = (0xF << 0) | (0xF << 4) |( 0xF << 8) | (0xF << 12), /* 15:0 */
#define FCPEN B17_MASK
#define FCCMD B16_MASK }FLOW_CONTROL_BITS;
#define PAUSE_LEN 0x0000FFFF /* 15:0 */
/* FFC THRESH 0xCC, 32bit register */
#define FFC_HIGH 0xFFFF0000 /* 31:16 */
#define FFC_LOW 0x0000FFFF /* 15:0 */
/* PHY_ ACCESS 0xD0, 32bit register */ /* PHY_ ACCESS 0xD0, 32bit register */
#define PHY_CMD_ACTIVE B31_MASK typedef enum {
#define PHY_WR_CMD B30_MASK
#define PHY_RD_CMD B29_MASK PHY_CMD_ACTIVE = (1 << 31),
#define PHY_RD_ERR B28_MASK PHY_WR_CMD = (1 << 30),
#define PHY_PRE_SUP B27_MASK PHY_RD_CMD = (1 << 29),
#define PHY_ADDR 0x03E00000 /* 25:21 */ PHY_RD_ERR = (1 << 28),
#define PHY_REG_ADDR 0x001F0000 /* 20:16 */ PHY_PRE_SUP = (1 << 27),
#define PHY_DATA 0x0000FFFF /* 15:0 */ PHY_ADDR = (1 << 21) | (1 << 22) | (1 << 23)|
(1 << 24) |(1 << 25),/* 25:21 */
/* LED0..3 0xE0..0xE6, 16bit register */ PHY_REG_ADDR = (1 << 16) | (1 << 17) | (1 << 18)| (1 << 19) | (1 << 20),/* 20:16 */
#define LEDOUT B15_MASK PHY_DATA = (0xF << 0)|(0xF << 4) |(0xF << 8)|
#define LEDPOL B14_MASK (0xF << 12),/* 15:0 */
#define LEDDIS B13_MASK
#define LEDSTRETCH B12_MASK }PHY_ACCESS_BITS;
#define LED1000 B8_MASK
#define LED100 B7_MASK
#define LEDMP B6_MASK
#define LEDFD B5_MASK
#define LEDLINK B4_MASK
#define LEDRCVMAT B3_MASK
#define LEDXMT B2_MASK
#define LEDRCV B1_MASK
#define LEDCOLOUT B0_MASK
/* EEPROM_ACC 0x17C, 16bit register */
#define PVALID B15_MASK
#define PREAD B14_MASK
#define EEDET B13_MASK
#define EEN B4_MASK
#define ECS B2_MASK
#define EESK B1_MASK
#define edi_edo b0_MASK
/* PMAT0 0x190, 32bit register */ /* PMAT0 0x190, 32bit register */
#define PMR_ACTIVE B31_MASK typedef enum {
#define PMR_WR_CMD B30_MASK PMR_ACTIVE = (1 << 31),
#define PMR_RD_CMD B29_MASK PMR_WR_CMD = (1 << 30),
#define PMR_BANK B28_MASK PMR_RD_CMD = (1 << 29),
#define PMR_ADDR 0x007F0000 /* 22:16 */ PMR_BANK = (1 <<28),
#define PMR_B4 0x000000FF /* 15:0 */ PMR_ADDR = (0xF << 16)|(1 << 20)|(1 << 21)|
(1 << 22),/* 22:16 */
PMR_B4 = (0xF << 0) | (0xF << 4),/* 15:0 */
}PMAT0_BITS;
/* PMAT1 0x194, 32bit register */ /* PMAT1 0x194, 32bit register */
#define PMR_B3 0xFF000000 /* 31:24 */ typedef enum {
#define PMR_B2 0x00FF0000 /* 23:16 */ PMR_B3 = (0xF << 24) | (0xF <<28),/* 31:24 */
#define PMR_B1 0x0000FF00 /* 15:8 */ PMR_B2 = (0xF << 16) |(0xF << 20),/* 23:16 */
#define PMR_B0 0x000000FF /* 7:0 */ PMR_B1 = (0xF << 8) | (0xF <<12), /* 15:8 */
PMR_B0 = (0xF << 0)|(0xF << 4),/* 7:0 */
}PMAT1_BITS;
/************************************************************************/ /************************************************************************/
/* */ /* */
...@@ -615,7 +566,7 @@ eg., if the value 10011010b is written into the least significant byte of a comm ...@@ -615,7 +566,7 @@ eg., if the value 10011010b is written into the least significant byte of a comm
#define PCI_VENDOR_ID_AMD 0x1022 #define PCI_VENDOR_ID_AMD 0x1022
#define PCI_DEVICE_ID_AMD8111E_7462 0x7462 #define PCI_DEVICE_ID_AMD8111E_7462 0x7462
#define MAX_UNITS 16 /* Maximum number of devices possible */ #define MAX_UNITS 8 /* Maximum number of devices possible */
#define NUM_TX_BUFFERS 32 /* Number of transmit buffers */ #define NUM_TX_BUFFERS 32 /* Number of transmit buffers */
#define NUM_RX_BUFFERS 32 /* Number of receive buffers */ #define NUM_RX_BUFFERS 32 /* Number of receive buffers */
...@@ -637,45 +588,73 @@ eg., if the value 10011010b is written into the least significant byte of a comm ...@@ -637,45 +588,73 @@ eg., if the value 10011010b is written into the least significant byte of a comm
#define MIN_PKT_LEN 60 #define MIN_PKT_LEN 60
#define ETH_ADDR_LEN 6 #define ETH_ADDR_LEN 6
#define AMD8111E_TX_TIMEOUT (3 * HZ)/* 3 sec */
#define SOFT_TIMER_FREQ 0xBEBC /* 0.5 sec */
#define DELAY_TIMER_CONV 50 /* msec to 10 usec conversion.
Only 500 usec resolution */
#define OPTION_VLAN_ENABLE 0x0001 #define OPTION_VLAN_ENABLE 0x0001
#define OPTION_JUMBO_ENABLE 0x0002 #define OPTION_JUMBO_ENABLE 0x0002
#define OPTION_MULTICAST_ENABLE 0x0004 #define OPTION_MULTICAST_ENABLE 0x0004
#define OPTION_WOL_ENABLE 0x0008 #define OPTION_WOL_ENABLE 0x0008
#define OPTION_WAKE_MAGIC_ENABLE 0x0010 #define OPTION_WAKE_MAGIC_ENABLE 0x0010
#define OPTION_WAKE_PHY_ENABLE 0x0020 #define OPTION_WAKE_PHY_ENABLE 0x0020
#define OPTION_INTR_COAL_ENABLE 0x0040
#define OPTION_DYN_IPG_ENABLE 0x0080
#define PHY_REG_ADDR_MASK 0x1f #define PHY_REG_ADDR_MASK 0x1f
/* ipg parameters */
#define DEFAULT_IPG 0x60
#define IFS1_DELTA 36
#define IPG_CONVERGE_TIME 0.5
#define IPG_STABLE_TIME 5
#define MIN_IPG 96
#define MAX_IPG 255
#define IPG_STEP 16
#define CSTATE 1
#define SSTATE 2
/* Assume contoller gets data 10 times the maximum processing time */ /* Assume contoller gets data 10 times the maximum processing time */
#define REPEAT_CNT 10; #define REPEAT_CNT 10;
/* amd8111e decriptor flag definitions */ /* amd8111e decriptor flag definitions */
typedef enum {
OWN_BIT = (1 << 15),
ADD_FCS_BIT = (1 << 13),
LTINT_BIT = (1 << 12),
STP_BIT = (1 << 9),
ENP_BIT = (1 << 8),
KILL_BIT = (1 << 6),
TCC_VLAN_INSERT = (1 << 1),
TCC_VLAN_REPLACE = (1 << 1) |( 1<< 0),
}TX_FLAG_BITS;
typedef enum {
ERR_BIT = (1 << 14),
FRAM_BIT = (1 << 13),
OFLO_BIT = (1 << 12),
CRC_BIT = (1 << 11),
PAM_BIT = (1 << 6),
LAFM_BIT = (1 << 5),
BAM_BIT = (1 << 4),
TT_VLAN_TAGGED = (1 << 3) |(1 << 2),/* 0x000 */
TT_PRTY_TAGGED = (1 << 3),/* 0x0008 */
}RX_FLAG_BITS;
#define OWN_BIT B15_MASK
#define ADD_FCS_BIT B13_MASK
#define LTINT_BIT B12_MASK
#define STP_BIT B9_MASK
#define ENP_BIT B8_MASK
#define KILL_BIT B6_MASK
#define TCC_MASK 0x0003
#define TCC_VLAN_INSERT B1_MASK
#define TCC_VLAN_REPLACE 0x0003
#define RESET_RX_FLAGS 0x0000 #define RESET_RX_FLAGS 0x0000
#define ERR_BIT B14_MASK
#define FRAM_BIT B13_MASK
#define OFLO_BIT B12_MASK
#define CRC_BIT B11_MASK
#define PAM_BIT B6_MASK
#define LAFM_BIT B5_MASK
#define BAM_BIT B4_MASK
#define TT_MASK 0x000c #define TT_MASK 0x000c
#define TT_VLAN_TAGGED 0x000c #define TCC_MASK 0x0003
#define TT_PRTY_TAGGED 0x0008
/* driver ioctl parameters */ /* driver ioctl parameters */
#define PHY_ID 0x01 /* currently it is fixed */ #define PHY_ID 0x01 /* currently it is fixed */
#define AMD8111E_REG_DUMP_LEN 4096 /* Memory mapped register length */ #define AMD8111E_REG_DUMP_LEN 13*sizeof(u32)
/* crc generator constants */
#define CRC32 0xedb88320
#define INITCRC 0xFFFFFFFF
/* amd8111e desriptor format */ /* amd8111e desriptor format */
...@@ -683,7 +662,7 @@ struct amd8111e_tx_dr{ ...@@ -683,7 +662,7 @@ struct amd8111e_tx_dr{
u16 buff_count; /* Size of the buffer pointed by this descriptor */ u16 buff_count; /* Size of the buffer pointed by this descriptor */
u16 tx_dr_offset2; u16 tx_flags;
u16 tag_ctrl_info; u16 tag_ctrl_info;
...@@ -704,7 +683,7 @@ struct amd8111e_rx_dr{ ...@@ -704,7 +683,7 @@ struct amd8111e_rx_dr{
u16 buff_count; /* Len of the buffer pointed by descriptor. */ u16 buff_count; /* Len of the buffer pointed by descriptor. */
u16 rx_dr_offset10; u16 rx_flags;
u32 buff_phy_addr; u32 buff_phy_addr;
...@@ -719,10 +698,58 @@ struct amd8111e_link_config{ ...@@ -719,10 +698,58 @@ struct amd8111e_link_config{
u16 speed; u16 speed;
u8 duplex; u8 duplex;
u8 autoneg; u8 autoneg;
u16 orig_speed;
u8 orig_duplex;
u8 reserved; /* 32bit alignment */ u8 reserved; /* 32bit alignment */
}; };
enum coal_type{
NO_COALESCE,
LOW_COALESCE,
MEDIUM_COALESCE,
HIGH_COALESCE,
};
enum coal_mode{
RX_INTR_COAL,
TX_INTR_COAL,
DISABLE_COAL,
ENABLE_COAL,
};
#define MAX_TIMEOUT 40
#define MAX_EVENT_COUNT 31
struct amd8111e_coalesce_conf{
unsigned int rx_timeout;
unsigned int rx_event_count;
unsigned long rx_packets;
unsigned long rx_prev_packets;
unsigned long rx_bytes;
unsigned long rx_prev_bytes;
unsigned int rx_coal_type;
unsigned int tx_timeout;
unsigned int tx_event_count;
unsigned long tx_packets;
unsigned long tx_prev_packets;
unsigned long tx_bytes;
unsigned long tx_prev_bytes;
unsigned int tx_coal_type;
};
struct ipg_info{
unsigned int ipg_state;
unsigned int ipg;
unsigned int current_ipg;
unsigned int col_cnt;
unsigned int diff_col_cnt;
unsigned int timer_tick;
unsigned int prev_ipg;
struct timer_list ipg_timer;
};
struct amd8111e_priv{ struct amd8111e_priv{
struct amd8111e_tx_dr* tx_ring; struct amd8111e_tx_dr* tx_ring;
...@@ -742,45 +769,54 @@ struct amd8111e_priv{ ...@@ -742,45 +769,54 @@ struct amd8111e_priv{
void * mmio; void * mmio;
spinlock_t lock; /* Guard lock */ spinlock_t lock; /* Guard lock */
unsigned long rx_idx, tx_idx; /* The next free ring entry */ unsigned long rx_idx, tx_idx; /* The next free ring entry */
unsigned long tx_complete_idx; unsigned long tx_complete_idx;
unsigned long tx_ring_complete_idx; unsigned long tx_ring_complete_idx;
unsigned long tx_ring_idx; unsigned long tx_ring_idx;
int rx_buff_len; /* Buffer length of rx buffers */ unsigned int rx_buff_len; /* Buffer length of rx buffers */
int options; /* Options enabled/disabled for the device */ int options; /* Options enabled/disabled for the device */
unsigned long ext_phy_option; unsigned long ext_phy_option;
struct amd8111e_link_config link_config; struct amd8111e_link_config link_config;
int pm_cap; int pm_cap;
u32 pm_state[12];
struct net_device *next; struct net_device *next;
int mii;
struct mii_if_info mii_if;
#if AMD8111E_VLAN_TAG_USED #if AMD8111E_VLAN_TAG_USED
struct vlan_group *vlgrp; struct vlan_group *vlgrp;
#endif #endif
char opened; char opened;
struct net_device_stats stats; struct net_device_stats stats;
struct net_device_stats prev_stats;
struct dev_mc_list* mc_list; struct dev_mc_list* mc_list;
struct amd8111e_coalesce_conf coal_conf;
struct ipg_info ipg_data;
}; };
#define AMD8111E_READ_REG64(_memMapBase, _offset, _pUlData) \
*(u32*)(_pUlData) = readl(_memMapBase + (_offset)); \
*((u32*)(_pUlData))+1) = readl(_memMapBase + ((_offset)+4))
#define AMD8111E_WRITE_REG64(_memMapBase, _offset, _pUlData) \ /* kernel provided writeq does not write 64 bits into the amd8111e device register instead writes only higher 32bits data into lower 32bits of the register.
writel(*(u32*)(_pUlData), _memMapBase + (_offset)); \ BUG? */
writel(*(u32*)((u8*)(_pUlData)+4), _memMapBase + ((_offset)+4)) \ #define amd8111e_writeq(_UlData,_memMap) \
writel(*(u32*)(&_UlData), _memMap); \
writel(*(u32*)((u8*)(&_UlData)+4), _memMap+4)
/* maps the external speed options to internal value */ /* maps the external speed options to internal value */
static unsigned char speed_duplex_mapping[] = { typedef enum {
SPEED_AUTONEG,
SPEED10_HALF,
SPEED10_FULL,
SPEED100_HALF,
SPEED100_FULL,
}EXT_PHY_OPTION;
XPHYANE, /* Auto-negotiation, speed_duplex option 0 */
0, /* 10M Half, speed_duplex option 1 */
XPHYFD, /* 10M Full, speed_duplex option 2 */
XPHYSP, /* 100M Half, speed_duplex option 3 */
XPHYFD | XPHYSP /* 100M Full, speed_duplex option 4 */
};
static int card_idx; static int card_idx;
static int speed_duplex[MAX_UNITS] = { 0, }; static int speed_duplex[MAX_UNITS] = { 0, };
static int coalesce[MAX_UNITS] = {1,1,1,1,1,1,1,1};
static int dynamic_ipg[MAX_UNITS] = {0,0,0,0,0,0,0,0};
static unsigned int chip_version;
#endif /* _AMD8111E_H */ #endif /* _AMD8111E_H */
...@@ -614,6 +614,22 @@ e100_found1(struct pci_dev *pcid, const struct pci_device_id *ent) ...@@ -614,6 +614,22 @@ e100_found1(struct pci_dev *pcid, const struct pci_device_id *ent)
goto err_dealloc; goto err_dealloc;
} }
dev->vlan_rx_register = e100_vlan_rx_register;
dev->vlan_rx_add_vid = e100_vlan_rx_add_vid;
dev->vlan_rx_kill_vid = e100_vlan_rx_kill_vid;
dev->irq = pcid->irq;
dev->open = &e100_open;
dev->hard_start_xmit = &e100_xmit_frame;
dev->stop = &e100_close;
dev->change_mtu = &e100_change_mtu;
dev->get_stats = &e100_get_stats;
dev->set_multicast_list = &e100_set_multi;
dev->set_mac_address = &e100_set_mac;
dev->do_ioctl = &e100_ioctl;
if (bdp->flags & USE_IPCB)
dev->features = NETIF_F_SG | NETIF_F_HW_CSUM |
NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
if ((rc = register_netdev(dev)) != 0) { if ((rc = register_netdev(dev)) != 0) {
goto err_pci; goto err_pci;
} }
...@@ -660,23 +676,6 @@ e100_found1(struct pci_dev *pcid, const struct pci_device_id *ent) ...@@ -660,23 +676,6 @@ e100_found1(struct pci_dev *pcid, const struct pci_device_id *ent)
goto err_unregister_netdev; goto err_unregister_netdev;
} }
dev->vlan_rx_register = e100_vlan_rx_register;
dev->vlan_rx_add_vid = e100_vlan_rx_add_vid;
dev->vlan_rx_kill_vid = e100_vlan_rx_kill_vid;
dev->irq = pcid->irq;
dev->open = &e100_open;
dev->hard_start_xmit = &e100_xmit_frame;
dev->stop = &e100_close;
dev->change_mtu = &e100_change_mtu;
dev->get_stats = &e100_get_stats;
dev->set_multicast_list = &e100_set_multi;
dev->set_mac_address = &e100_set_mac;
dev->do_ioctl = &e100_ioctl;
if (bdp->flags & USE_IPCB)
dev->features = NETIF_F_SG | NETIF_F_HW_CSUM |
NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
e100nics++; e100nics++;
e100_get_speed_duplex_caps(bdp); e100_get_speed_duplex_caps(bdp);
......
...@@ -602,7 +602,7 @@ static int __devinit netdrv_init_board (struct pci_dev *pdev, ...@@ -602,7 +602,7 @@ static int __devinit netdrv_init_board (struct pci_dev *pdev,
*ioaddr_out = NULL; *ioaddr_out = NULL;
*dev_out = NULL; *dev_out = NULL;
/* dev zeroed in init_etherdev */ /* dev zeroed in alloc_etherdev */
dev = alloc_etherdev (sizeof (*tp)); dev = alloc_etherdev (sizeof (*tp));
if (dev == NULL) { if (dev == NULL) {
printk (KERN_ERR PFX "unable to alloc new ethernet\n"); printk (KERN_ERR PFX "unable to alloc new ethernet\n");
...@@ -790,7 +790,7 @@ static int __devinit netdrv_init_one (struct pci_dev *pdev, ...@@ -790,7 +790,7 @@ static int __devinit netdrv_init_one (struct pci_dev *pdev,
dev->irq = pdev->irq; dev->irq = pdev->irq;
dev->base_addr = (unsigned long) ioaddr; dev->base_addr = (unsigned long) ioaddr;
/* dev->priv/tp zeroed and aligned in init_etherdev */ /* dev->priv/tp zeroed and aligned in alloc_etherdev */
tp = dev->priv; tp = dev->priv;
/* note: tp->chipset set in netdrv_init_board */ /* note: tp->chipset set in netdrv_init_board */
......
...@@ -1002,7 +1002,9 @@ pcnet32_init_ring(struct net_device *dev) ...@@ -1002,7 +1002,9 @@ pcnet32_init_ring(struct net_device *dev)
} }
skb_reserve (rx_skbuff, 2); skb_reserve (rx_skbuff, 2);
} }
lp->rx_dma_addr[i] = pci_map_single(lp->pci_dev, rx_skbuff->tail, rx_skbuff->len, PCI_DMA_FROMDEVICE);
if (lp->rx_dma_addr[i] == NULL)
lp->rx_dma_addr[i] = pci_map_single(lp->pci_dev, rx_skbuff->tail, rx_skbuff->len, PCI_DMA_FROMDEVICE);
lp->rx_ring[i].base = (u32)le32_to_cpu(lp->rx_dma_addr[i]); lp->rx_ring[i].base = (u32)le32_to_cpu(lp->rx_dma_addr[i]);
lp->rx_ring[i].buf_length = le16_to_cpu(-PKT_BUF_SZ); lp->rx_ring[i].buf_length = le16_to_cpu(-PKT_BUF_SZ);
lp->rx_ring[i].status = le16_to_cpu(0x8000); lp->rx_ring[i].status = le16_to_cpu(0x8000);
...@@ -1037,7 +1039,7 @@ pcnet32_restart(struct net_device *dev, unsigned int csr0_bits) ...@@ -1037,7 +1039,7 @@ pcnet32_restart(struct net_device *dev, unsigned int csr0_bits)
/* ReInit Ring */ /* ReInit Ring */
lp->a.write_csr (ioaddr, 0, 1); lp->a.write_csr (ioaddr, 0, 1);
i = 0; i = 0;
while (i++ < 100) while (i++ < 1000)
if (lp->a.read_csr (ioaddr, 0) & 0x0100) if (lp->a.read_csr (ioaddr, 0) & 0x0100)
break; break;
...@@ -1128,6 +1130,7 @@ pcnet32_start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -1128,6 +1130,7 @@ pcnet32_start_xmit(struct sk_buff *skb, struct net_device *dev)
lp->tx_skbuff[entry] = skb; lp->tx_skbuff[entry] = skb;
lp->tx_dma_addr[entry] = pci_map_single(lp->pci_dev, skb->data, skb->len, PCI_DMA_TODEVICE); lp->tx_dma_addr[entry] = pci_map_single(lp->pci_dev, skb->data, skb->len, PCI_DMA_TODEVICE);
lp->tx_ring[entry].base = (u32)le32_to_cpu(lp->tx_dma_addr[entry]); lp->tx_ring[entry].base = (u32)le32_to_cpu(lp->tx_dma_addr[entry]);
wmb(); /* Make sure owner changes after all others are visible */
lp->tx_ring[entry].status = le16_to_cpu(status); lp->tx_ring[entry].status = le16_to_cpu(status);
lp->cur_tx++; lp->cur_tx++;
......
...@@ -365,8 +365,8 @@ rtl8169_init_board(struct pci_dev *pdev, struct net_device **dev_out, ...@@ -365,8 +365,8 @@ rtl8169_init_board(struct pci_dev *pdev, struct net_device **dev_out,
*ioaddr_out = NULL; *ioaddr_out = NULL;
*dev_out = NULL; *dev_out = NULL;
// dev zeroed in init_etherdev // dev zeroed in alloc_etherdev
dev = init_etherdev(NULL, sizeof (*tp)); dev = alloc_etherdev(sizeof (*tp));
if (dev == NULL) { if (dev == NULL) {
printk(KERN_ERR PFX "unable to alloc new ethernet\n"); printk(KERN_ERR PFX "unable to alloc new ethernet\n");
return -ENOMEM; return -ENOMEM;
...@@ -391,18 +391,18 @@ rtl8169_init_board(struct pci_dev *pdev, struct net_device **dev_out, ...@@ -391,18 +391,18 @@ rtl8169_init_board(struct pci_dev *pdev, struct net_device **dev_out,
printk(KERN_ERR PFX printk(KERN_ERR PFX
"region #1 not an MMIO resource, aborting\n"); "region #1 not an MMIO resource, aborting\n");
rc = -ENODEV; rc = -ENODEV;
goto err_out; goto err_out_disable;
} }
// check for weird/broken PCI region reporting // check for weird/broken PCI region reporting
if (mmio_len < RTL_MIN_IO_SIZE) { if (mmio_len < RTL_MIN_IO_SIZE) {
printk(KERN_ERR PFX "Invalid PCI region size(s), aborting\n"); printk(KERN_ERR PFX "Invalid PCI region size(s), aborting\n");
rc = -ENODEV; rc = -ENODEV;
goto err_out; goto err_out_disable;
} }
rc = pci_request_regions(pdev, dev->name); rc = pci_request_regions(pdev, dev->name);
if (rc) if (rc)
goto err_out; goto err_out_disable;
// enable PCI bus-mastering // enable PCI bus-mastering
pci_set_master(pdev); pci_set_master(pdev);
...@@ -450,8 +450,10 @@ rtl8169_init_board(struct pci_dev *pdev, struct net_device **dev_out, ...@@ -450,8 +450,10 @@ rtl8169_init_board(struct pci_dev *pdev, struct net_device **dev_out,
err_out_free_res: err_out_free_res:
pci_release_regions(pdev); pci_release_regions(pdev);
err_out_disable:
pci_disable_device(pdev);
err_out: err_out:
unregister_netdev(dev);
kfree(dev); kfree(dev);
return rc; return rc;
} }
...@@ -464,7 +466,7 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -464,7 +466,7 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
void *ioaddr = NULL; void *ioaddr = NULL;
static int board_idx = -1; static int board_idx = -1;
static int printed_version = 0; static int printed_version = 0;
int i; int i, rc;
int option = -1, Cap10_100 = 0, Cap1000 = 0; int option = -1, Cap10_100 = 0, Cap1000 = 0;
assert(pdev != NULL); assert(pdev != NULL);
...@@ -477,20 +479,18 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -477,20 +479,18 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
printed_version = 1; printed_version = 1;
} }
i = rtl8169_init_board(pdev, &dev, &ioaddr); rc = rtl8169_init_board(pdev, &dev, &ioaddr);
if (i < 0) { if (rc)
return i; return rc;
}
tp = dev->priv; tp = dev->priv;
assert(ioaddr != NULL); assert(ioaddr != NULL);
assert(dev != NULL); assert(dev != NULL);
assert(tp != NULL); assert(tp != NULL);
// Get MAC address // // Get MAC address. FIXME: read EEPROM
for (i = 0; i < MAC_ADDR_LEN; i++) { for (i = 0; i < MAC_ADDR_LEN; i++)
dev->dev_addr[i] = RTL_R8(MAC0 + i); dev->dev_addr[i] = RTL_R8(MAC0 + i);
}
dev->open = rtl8169_open; dev->open = rtl8169_open;
dev->hard_start_xmit = rtl8169_start_xmit; dev->hard_start_xmit = rtl8169_start_xmit;
...@@ -507,11 +507,20 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -507,11 +507,20 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
tp->pci_dev = pdev; tp->pci_dev = pdev;
tp->mmio_addr = ioaddr; tp->mmio_addr = ioaddr;
spin_lock_init(&tp->lock);
rc = register_netdev(dev);
if (rc) {
iounmap(ioaddr);
pci_release_regions(pdev);
pci_disable_device(pdev);
kfree(dev);
return rc;
}
printk(KERN_DEBUG "%s: Identified chip type is '%s'.\n", dev->name, printk(KERN_DEBUG "%s: Identified chip type is '%s'.\n", dev->name,
rtl_chip_info[tp->chipset].name); rtl_chip_info[tp->chipset].name);
spin_lock_init(&tp->lock);
pci_set_drvdata(pdev, dev); pci_set_drvdata(pdev, dev);
printk(KERN_INFO "%s: %s at 0x%lx, " printk(KERN_INFO "%s: %s at 0x%lx, "
...@@ -623,7 +632,7 @@ static void __devexit ...@@ -623,7 +632,7 @@ static void __devexit
rtl8169_remove_one(struct pci_dev *pdev) rtl8169_remove_one(struct pci_dev *pdev)
{ {
struct net_device *dev = pci_get_drvdata(pdev); struct net_device *dev = pci_get_drvdata(pdev);
struct rtl8169_private *tp = (struct rtl8169_private *) (dev->priv); struct rtl8169_private *tp = dev->priv;
assert(dev != NULL); assert(dev != NULL);
assert(tp != NULL); assert(tp != NULL);
...@@ -636,6 +645,7 @@ rtl8169_remove_one(struct pci_dev *pdev) ...@@ -636,6 +645,7 @@ rtl8169_remove_one(struct pci_dev *pdev)
memset(dev, 0xBC, memset(dev, 0xBC,
sizeof (struct net_device) + sizeof (struct rtl8169_private)); sizeof (struct net_device) + sizeof (struct rtl8169_private));
pci_disable_device(pdev);
kfree(dev); kfree(dev);
pci_set_drvdata(pdev, NULL); pci_set_drvdata(pdev, NULL);
} }
......
...@@ -346,6 +346,27 @@ static void TLan_EeReceiveByte( u16, u8 *, int ); ...@@ -346,6 +346,27 @@ static void TLan_EeReceiveByte( u16, u8 *, int );
static int TLan_EeReadByte( struct net_device *, u8, u8 * ); static int TLan_EeReadByte( struct net_device *, u8, u8 * );
static void
TLan_StoreSKB( struct tlan_list_tag *tag, struct sk_buff *skb)
{
unsigned long addr = (unsigned long)skb;
tag->buffer[9].address = (u32)addr;
addr >>= 31; /* >>= 32 is undefined for 32bit arch, stupid C */
addr >>= 1;
tag->buffer[8].address = (u32)addr;
}
static struct sk_buff *
TLan_GetSKB( struct tlan_list_tag *tag)
{
unsigned long addr = tag->buffer[8].address;
addr <<= 31;
addr <<= 1;
addr |= tag->buffer[9].address;
return (struct sk_buff *) addr;
}
static TLanIntVectorFunc *TLanIntVector[TLAN_INT_NUMBER_OF_INTS] = { static TLanIntVectorFunc *TLanIntVector[TLAN_INT_NUMBER_OF_INTS] = {
TLan_HandleInvalid, TLan_HandleInvalid,
TLan_HandleTxEOF, TLan_HandleTxEOF,
...@@ -424,7 +445,7 @@ static void __devexit tlan_remove_one( struct pci_dev *pdev) ...@@ -424,7 +445,7 @@ static void __devexit tlan_remove_one( struct pci_dev *pdev)
pci_free_consistent(priv->pciDev, priv->dmaSize, priv->dmaStorage, priv->dmaStorageDMA ); pci_free_consistent(priv->pciDev, priv->dmaSize, priv->dmaStorage, priv->dmaStorageDMA );
} }
release_region( dev->base_addr, 0x10 ); pci_release_regions(pdev);
kfree( dev ); kfree( dev );
...@@ -510,15 +531,25 @@ static int __devinit TLan_probe1(struct pci_dev *pdev, ...@@ -510,15 +531,25 @@ static int __devinit TLan_probe1(struct pci_dev *pdev,
TLanPrivateInfo *priv; TLanPrivateInfo *priv;
u8 pci_rev; u8 pci_rev;
u16 device_id; u16 device_id;
int reg; int reg, rc = -ENODEV;
if (pdev && pci_enable_device(pdev)) if (pdev) {
return -EIO; rc = pci_enable_device(pdev);
if (rc)
return rc;
rc = pci_request_regions(pdev, TLanSignature);
if (rc) {
printk(KERN_ERR "TLAN: Could not reserve IO regions\n");
goto err_out;
}
}
dev = init_etherdev(NULL, sizeof(TLanPrivateInfo)); dev = alloc_etherdev(sizeof(TLanPrivateInfo));
if (dev == NULL) { if (dev == NULL) {
printk(KERN_ERR "TLAN: Could not allocate memory for device.\n"); printk(KERN_ERR "TLAN: Could not allocate memory for device.\n");
return -ENOMEM; rc = -ENOMEM;
goto err_out_regions;
} }
SET_MODULE_OWNER(dev); SET_MODULE_OWNER(dev);
SET_NETDEV_DEV(dev, &pdev->dev); SET_NETDEV_DEV(dev, &pdev->dev);
...@@ -533,12 +564,10 @@ static int __devinit TLan_probe1(struct pci_dev *pdev, ...@@ -533,12 +564,10 @@ static int __devinit TLan_probe1(struct pci_dev *pdev,
priv->adapter = &board_info[ent->driver_data]; priv->adapter = &board_info[ent->driver_data];
if(pci_set_dma_mask(pdev, 0xFFFFFFFF)) rc = pci_set_dma_mask(pdev, 0xFFFFFFFF);
{ if (rc) {
printk(KERN_ERR "TLAN: No suitable PCI mapping available.\n"); printk(KERN_ERR "TLAN: No suitable PCI mapping available.\n");
unregister_netdev(dev); goto err_out_free_dev;
kfree(dev);
return -ENODEV;
} }
pci_read_config_byte ( pdev, PCI_REVISION_ID, &pci_rev); pci_read_config_byte ( pdev, PCI_REVISION_ID, &pci_rev);
...@@ -553,9 +582,8 @@ static int __devinit TLan_probe1(struct pci_dev *pdev, ...@@ -553,9 +582,8 @@ static int __devinit TLan_probe1(struct pci_dev *pdev,
} }
if (!pci_io_base) { if (!pci_io_base) {
printk(KERN_ERR "TLAN: No IO mappings available\n"); printk(KERN_ERR "TLAN: No IO mappings available\n");
unregister_netdev(dev); rc = -EIO;
kfree(dev); goto err_out_free_dev;
return -ENODEV;
} }
dev->base_addr = pci_io_base; dev->base_addr = pci_io_base;
...@@ -605,12 +633,18 @@ static int __devinit TLan_probe1(struct pci_dev *pdev, ...@@ -605,12 +633,18 @@ static int __devinit TLan_probe1(struct pci_dev *pdev,
spin_lock_init(&priv->lock); spin_lock_init(&priv->lock);
if (TLan_Init(dev)) { rc = TLan_Init(dev);
if (rc) {
printk(KERN_ERR "TLAN: Could not set up device.\n");
goto err_out_free_dev;
}
rc = register_netdev(dev);
if (rc) {
printk(KERN_ERR "TLAN: Could not register device.\n"); printk(KERN_ERR "TLAN: Could not register device.\n");
unregister_netdev(dev); goto err_out_uninit;
kfree(dev); }
return -EAGAIN;
} else {
TLanDevicesInstalled++; TLanDevicesInstalled++;
boards_found++; boards_found++;
...@@ -631,8 +665,19 @@ static int __devinit TLan_probe1(struct pci_dev *pdev, ...@@ -631,8 +665,19 @@ static int __devinit TLan_probe1(struct pci_dev *pdev,
priv->adapter->deviceLabel, priv->adapter->deviceLabel,
priv->adapterRev); priv->adapterRev);
return 0; return 0;
}
err_out_uninit:
pci_free_consistent(priv->pciDev, priv->dmaSize, priv->dmaStorage,
priv->dmaStorageDMA );
err_out_free_dev:
kfree(dev);
err_out_regions:
if (pdev)
pci_release_regions(pdev);
err_out:
if (pdev)
pci_disable_device(pdev);
return rc;
} }
...@@ -798,15 +843,6 @@ static int TLan_Init( struct net_device *dev ) ...@@ -798,15 +843,6 @@ static int TLan_Init( struct net_device *dev )
priv = dev->priv; priv = dev->priv;
if (!priv->is_eisa) /* EISA devices have already requested IO */
if (!request_region( dev->base_addr, 0x10, TLanSignature )) {
printk(KERN_ERR "TLAN: %s: IO port region 0x%lx size 0x%x in use.\n",
dev->name,
dev->base_addr,
0x10 );
return -EIO;
}
if ( bbuf ) { if ( bbuf ) {
dma_size = ( TLAN_NUM_RX_LISTS + TLAN_NUM_TX_LISTS ) dma_size = ( TLAN_NUM_RX_LISTS + TLAN_NUM_TX_LISTS )
* ( sizeof(TLanList) + TLAN_MAX_FRAME_SIZE ); * ( sizeof(TLanList) + TLAN_MAX_FRAME_SIZE );
...@@ -820,7 +856,6 @@ static int TLan_Init( struct net_device *dev ) ...@@ -820,7 +856,6 @@ static int TLan_Init( struct net_device *dev )
if ( priv->dmaStorage == NULL ) { if ( priv->dmaStorage == NULL ) {
printk(KERN_ERR "TLAN: Could not allocate lists and buffers for %s.\n", printk(KERN_ERR "TLAN: Could not allocate lists and buffers for %s.\n",
dev->name ); dev->name );
release_region( dev->base_addr, 0x10 );
return -ENOMEM; return -ENOMEM;
} }
memset( priv->dmaStorage, 0, dma_size ); memset( priv->dmaStorage, 0, dma_size );
...@@ -1039,7 +1074,7 @@ static int TLan_StartTx( struct sk_buff *skb, struct net_device *dev ) ...@@ -1039,7 +1074,7 @@ static int TLan_StartTx( struct sk_buff *skb, struct net_device *dev )
memcpy( tail_buffer, skb->data, skb->len ); memcpy( tail_buffer, skb->data, skb->len );
} else { } else {
tail_list->buffer[0].address = pci_map_single(priv->pciDev, skb->data, skb->len, PCI_DMA_TODEVICE); tail_list->buffer[0].address = pci_map_single(priv->pciDev, skb->data, skb->len, PCI_DMA_TODEVICE);
tail_list->buffer[9].address = (u32) skb; TLan_StoreSKB(tail_list, skb);
} }
pad = TLAN_MIN_FRAME_SIZE - skb->len; pad = TLAN_MIN_FRAME_SIZE - skb->len;
...@@ -1365,9 +1400,10 @@ u32 TLan_HandleTxEOF( struct net_device *dev, u16 host_int ) ...@@ -1365,9 +1400,10 @@ u32 TLan_HandleTxEOF( struct net_device *dev, u16 host_int )
while (((tmpCStat = head_list->cStat ) & TLAN_CSTAT_FRM_CMP) && (ack < 255)) { while (((tmpCStat = head_list->cStat ) & TLAN_CSTAT_FRM_CMP) && (ack < 255)) {
ack++; ack++;
if ( ! bbuf ) { if ( ! bbuf ) {
struct sk_buff *skb = (struct sk_buff *) head_list->buffer[9].address; struct sk_buff *skb = TLan_GetSKB(head_list);
pci_unmap_single(priv->pciDev, head_list->buffer[0].address, skb->len, PCI_DMA_TODEVICE); pci_unmap_single(priv->pciDev, head_list->buffer[0].address, skb->len, PCI_DMA_TODEVICE);
dev_kfree_skb_any(skb); dev_kfree_skb_any(skb);
head_list->buffer[8].address = 0;
head_list->buffer[9].address = 0; head_list->buffer[9].address = 0;
} }
...@@ -1523,7 +1559,7 @@ u32 TLan_HandleRxEOF( struct net_device *dev, u16 host_int ) ...@@ -1523,7 +1559,7 @@ u32 TLan_HandleRxEOF( struct net_device *dev, u16 host_int )
new_skb = dev_alloc_skb( TLAN_MAX_FRAME_SIZE + 7 ); new_skb = dev_alloc_skb( TLAN_MAX_FRAME_SIZE + 7 );
if ( new_skb != NULL ) { if ( new_skb != NULL ) {
skb = (struct sk_buff *) head_list->buffer[9].address; skb = TLan_GetSKB(head_list);
pci_unmap_single(priv->pciDev, head_list->buffer[0].address, TLAN_MAX_FRAME_SIZE, PCI_DMA_FROMDEVICE); pci_unmap_single(priv->pciDev, head_list->buffer[0].address, TLAN_MAX_FRAME_SIZE, PCI_DMA_FROMDEVICE);
skb_trim( skb, frameSize ); skb_trim( skb, frameSize );
...@@ -1537,10 +1573,7 @@ u32 TLan_HandleRxEOF( struct net_device *dev, u16 host_int ) ...@@ -1537,10 +1573,7 @@ u32 TLan_HandleRxEOF( struct net_device *dev, u16 host_int )
t = (void *) skb_put( new_skb, TLAN_MAX_FRAME_SIZE ); t = (void *) skb_put( new_skb, TLAN_MAX_FRAME_SIZE );
head_list->buffer[0].address = pci_map_single(priv->pciDev, new_skb->data, TLAN_MAX_FRAME_SIZE, PCI_DMA_FROMDEVICE); head_list->buffer[0].address = pci_map_single(priv->pciDev, new_skb->data, TLAN_MAX_FRAME_SIZE, PCI_DMA_FROMDEVICE);
head_list->buffer[8].address = (u32) t; head_list->buffer[8].address = (u32) t;
#if BITS_PER_LONG==64 TLan_StoreSKB(head_list, new_skb);
#error "Not 64bit clean"
#endif
head_list->buffer[9].address = (u32) new_skb;
} else } else
printk(KERN_WARNING "TLAN: Couldn't allocate memory for received data.\n" ); printk(KERN_WARNING "TLAN: Couldn't allocate memory for received data.\n" );
} }
...@@ -1926,6 +1959,7 @@ void TLan_ResetLists( struct net_device *dev ) ...@@ -1926,6 +1959,7 @@ void TLan_ResetLists( struct net_device *dev )
} }
list->buffer[2].count = 0; list->buffer[2].count = 0;
list->buffer[2].address = 0; list->buffer[2].address = 0;
list->buffer[8].address = 0;
list->buffer[9].address = 0; list->buffer[9].address = 0;
} }
...@@ -1951,7 +1985,7 @@ void TLan_ResetLists( struct net_device *dev ) ...@@ -1951,7 +1985,7 @@ void TLan_ResetLists( struct net_device *dev )
} }
list->buffer[0].address = pci_map_single(priv->pciDev, t, TLAN_MAX_FRAME_SIZE, PCI_DMA_FROMDEVICE); list->buffer[0].address = pci_map_single(priv->pciDev, t, TLAN_MAX_FRAME_SIZE, PCI_DMA_FROMDEVICE);
list->buffer[8].address = (u32) t; list->buffer[8].address = (u32) t;
list->buffer[9].address = (u32) skb; TLan_StoreSKB(list, skb);
} }
list->buffer[1].count = 0; list->buffer[1].count = 0;
list->buffer[1].address = 0; list->buffer[1].address = 0;
...@@ -1974,20 +2008,22 @@ void TLan_FreeLists( struct net_device *dev ) ...@@ -1974,20 +2008,22 @@ void TLan_FreeLists( struct net_device *dev )
if ( ! bbuf ) { if ( ! bbuf ) {
for ( i = 0; i < TLAN_NUM_TX_LISTS; i++ ) { for ( i = 0; i < TLAN_NUM_TX_LISTS; i++ ) {
list = priv->txList + i; list = priv->txList + i;
skb = (struct sk_buff *) list->buffer[9].address; skb = TLan_GetSKB(list);
if ( skb ) { if ( skb ) {
pci_unmap_single(priv->pciDev, list->buffer[0].address, skb->len, PCI_DMA_TODEVICE); pci_unmap_single(priv->pciDev, list->buffer[0].address, skb->len, PCI_DMA_TODEVICE);
dev_kfree_skb_any( skb ); dev_kfree_skb_any( skb );
list->buffer[8].address = 0;
list->buffer[9].address = 0; list->buffer[9].address = 0;
} }
} }
for ( i = 0; i < TLAN_NUM_RX_LISTS; i++ ) { for ( i = 0; i < TLAN_NUM_RX_LISTS; i++ ) {
list = priv->rxList + i; list = priv->rxList + i;
skb = (struct sk_buff *) list->buffer[9].address; skb = TLan_GetSKB(list);
if ( skb ) { if ( skb ) {
pci_unmap_single(priv->pciDev, list->buffer[0].address, TLAN_MAX_FRAME_SIZE, PCI_DMA_FROMDEVICE); pci_unmap_single(priv->pciDev, list->buffer[0].address, TLAN_MAX_FRAME_SIZE, PCI_DMA_FROMDEVICE);
dev_kfree_skb_any( skb ); dev_kfree_skb_any( skb );
list->buffer[8].address = 0;
list->buffer[9].address = 0; list->buffer[9].address = 0;
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment