Commit 306ebc09 authored by Jeff Garzik's avatar Jeff Garzik

Merge pobox.com:/garz/repo/netdev-2.6/e1000

into pobox.com:/garz/repo/net-drivers-2.6
parents 8672a651 f4f57a52
...@@ -138,6 +138,7 @@ struct e1000_adapter; ...@@ -138,6 +138,7 @@ struct e1000_adapter;
#define E1000_RX_BUFFER_WRITE 16 /* Must be power of 2 */ #define E1000_RX_BUFFER_WRITE 16 /* Must be power of 2 */
#define AUTO_ALL_MODES 0 #define AUTO_ALL_MODES 0
#define E1000_EEPROM_82544_APM 0x0004
#define E1000_EEPROM_APME 0x0400 #define E1000_EEPROM_APME 0x0400
#ifndef E1000_MASTER_SLAVE #ifndef E1000_MASTER_SLAVE
...@@ -209,6 +210,7 @@ struct e1000_adapter { ...@@ -209,6 +210,7 @@ struct e1000_adapter {
/* TX */ /* TX */
struct e1000_desc_ring tx_ring; struct e1000_desc_ring tx_ring;
struct e1000_buffer previous_buffer_info;
spinlock_t tx_lock; spinlock_t tx_lock;
uint32_t txd_cmd; uint32_t txd_cmd;
uint32_t tx_int_delay; uint32_t tx_int_delay;
...@@ -222,6 +224,7 @@ struct e1000_adapter { ...@@ -222,6 +224,7 @@ struct e1000_adapter {
uint32_t tx_fifo_size; uint32_t tx_fifo_size;
atomic_t tx_fifo_stall; atomic_t tx_fifo_stall;
boolean_t pcix_82544; boolean_t pcix_82544;
boolean_t detect_tx_hung;
/* RX */ /* RX */
struct e1000_desc_ring rx_ring; struct e1000_desc_ring rx_ring;
......
...@@ -1310,7 +1310,7 @@ e1000_run_loopback_test(struct e1000_adapter *adapter) ...@@ -1310,7 +1310,7 @@ e1000_run_loopback_test(struct e1000_adapter *adapter)
struct e1000_desc_ring *txdr = &adapter->test_tx_ring; struct e1000_desc_ring *txdr = &adapter->test_tx_ring;
struct e1000_desc_ring *rxdr = &adapter->test_rx_ring; struct e1000_desc_ring *rxdr = &adapter->test_rx_ring;
struct pci_dev *pdev = adapter->pdev; struct pci_dev *pdev = adapter->pdev;
int i; int i, ret_val;
E1000_WRITE_REG(&adapter->hw, RDT, rxdr->count - 1); E1000_WRITE_REG(&adapter->hw, RDT, rxdr->count - 1);
...@@ -1330,11 +1330,12 @@ e1000_run_loopback_test(struct e1000_adapter *adapter) ...@@ -1330,11 +1330,12 @@ e1000_run_loopback_test(struct e1000_adapter *adapter)
rxdr->buffer_info[i].length, rxdr->buffer_info[i].length,
PCI_DMA_FROMDEVICE); PCI_DMA_FROMDEVICE);
if (!e1000_check_lbtest_frame(rxdr->buffer_info[i++].skb, 1024)) ret_val = e1000_check_lbtest_frame(rxdr->buffer_info[i].skb,
return 0; 1024);
} while (i < 64); i++;
} while (ret_val != 0 && i < 64);
return 13; return ret_val;
} }
static int static int
......
...@@ -1572,7 +1572,8 @@ e1000_phy_force_speed_duplex(struct e1000_hw *hw) ...@@ -1572,7 +1572,8 @@ e1000_phy_force_speed_duplex(struct e1000_hw *hw)
if(mii_status_reg & MII_SR_LINK_STATUS) break; if(mii_status_reg & MII_SR_LINK_STATUS) break;
msec_delay(100); msec_delay(100);
} }
if((i == 0) && (hw->phy_type == e1000_phy_m88)) { if((i == 0) &&
(hw->phy_type == e1000_phy_m88)) {
/* We didn't get link. Reset the DSP and wait again for link. */ /* We didn't get link. Reset the DSP and wait again for link. */
ret_val = e1000_phy_reset_dsp(hw); ret_val = e1000_phy_reset_dsp(hw);
if(ret_val) { if(ret_val) {
...@@ -2503,7 +2504,7 @@ e1000_read_phy_reg(struct e1000_hw *hw, ...@@ -2503,7 +2504,7 @@ e1000_read_phy_reg(struct e1000_hw *hw,
} }
} }
ret_val = e1000_read_phy_reg_ex(hw, IGP01E1000_PHY_PAGE_SELECT & reg_addr, ret_val = e1000_read_phy_reg_ex(hw, MAX_PHY_REG_ADDRESS & reg_addr,
phy_data); phy_data);
return ret_val; return ret_val;
...@@ -2609,7 +2610,7 @@ e1000_write_phy_reg(struct e1000_hw *hw, ...@@ -2609,7 +2610,7 @@ e1000_write_phy_reg(struct e1000_hw *hw,
} }
} }
ret_val = e1000_write_phy_reg_ex(hw, IGP01E1000_PHY_PAGE_SELECT & reg_addr, ret_val = e1000_write_phy_reg_ex(hw, MAX_PHY_REG_ADDRESS & reg_addr,
phy_data); phy_data);
return ret_val; return ret_val;
...@@ -2956,7 +2957,6 @@ e1000_phy_m88_get_info(struct e1000_hw *hw, ...@@ -2956,7 +2957,6 @@ e1000_phy_m88_get_info(struct e1000_hw *hw,
ret_val = e1000_check_polarity(hw, &polarity); ret_val = e1000_check_polarity(hw, &polarity);
if(ret_val) if(ret_val)
return ret_val; return ret_val;
phy_info->cable_polarity = polarity; phy_info->cable_polarity = polarity;
ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data); ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data);
...@@ -2966,9 +2966,9 @@ e1000_phy_m88_get_info(struct e1000_hw *hw, ...@@ -2966,9 +2966,9 @@ e1000_phy_m88_get_info(struct e1000_hw *hw,
phy_info->mdix_mode = (phy_data & M88E1000_PSSR_MDIX) >> phy_info->mdix_mode = (phy_data & M88E1000_PSSR_MDIX) >>
M88E1000_PSSR_MDIX_SHIFT; M88E1000_PSSR_MDIX_SHIFT;
if(phy_data & M88E1000_PSSR_1000MBS) { if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS) {
/* Cable Length Estimation and Local/Remote Receiver Informatoion /* Cable Length Estimation and Local/Remote Receiver Information
* are only valid at 1000 Mbps * are only valid at 1000 Mbps.
*/ */
phy_info->cable_length = ((phy_data & M88E1000_PSSR_CABLE_LENGTH) >> phy_info->cable_length = ((phy_data & M88E1000_PSSR_CABLE_LENGTH) >>
M88E1000_PSSR_CABLE_LENGTH_SHIFT); M88E1000_PSSR_CABLE_LENGTH_SHIFT);
...@@ -4639,13 +4639,14 @@ e1000_get_bus_info(struct e1000_hw *hw) ...@@ -4639,13 +4639,14 @@ e1000_get_bus_info(struct e1000_hw *hw)
{ {
uint32_t status; uint32_t status;
if(hw->mac_type < e1000_82543) { switch (hw->mac_type) {
case e1000_82542_rev2_0:
case e1000_82542_rev2_1:
hw->bus_type = e1000_bus_type_unknown; hw->bus_type = e1000_bus_type_unknown;
hw->bus_speed = e1000_bus_speed_unknown; hw->bus_speed = e1000_bus_speed_unknown;
hw->bus_width = e1000_bus_width_unknown; hw->bus_width = e1000_bus_width_unknown;
return; break;
} default:
status = E1000_READ_REG(hw, STATUS); status = E1000_READ_REG(hw, STATUS);
hw->bus_type = (status & E1000_STATUS_PCIX_MODE) ? hw->bus_type = (status & E1000_STATUS_PCIX_MODE) ?
e1000_bus_type_pcix : e1000_bus_type_pci; e1000_bus_type_pcix : e1000_bus_type_pci;
...@@ -4674,6 +4675,8 @@ e1000_get_bus_info(struct e1000_hw *hw) ...@@ -4674,6 +4675,8 @@ e1000_get_bus_info(struct e1000_hw *hw)
} }
hw->bus_width = (status & E1000_STATUS_BUS64) ? hw->bus_width = (status & E1000_STATUS_BUS64) ?
e1000_bus_width_64 : e1000_bus_width_32; e1000_bus_width_64 : e1000_bus_width_32;
break;
}
} }
/****************************************************************************** /******************************************************************************
* Reads a value from one of the devices registers using port I/O (as opposed * Reads a value from one of the devices registers using port I/O (as opposed
...@@ -4738,6 +4741,7 @@ e1000_get_cable_length(struct e1000_hw *hw, ...@@ -4738,6 +4741,7 @@ e1000_get_cable_length(struct e1000_hw *hw,
uint16_t agc_value = 0; uint16_t agc_value = 0;
uint16_t cur_agc, min_agc = IGP01E1000_AGC_LENGTH_TABLE_SIZE; uint16_t cur_agc, min_agc = IGP01E1000_AGC_LENGTH_TABLE_SIZE;
uint16_t i, phy_data; uint16_t i, phy_data;
uint16_t cable_length;
DEBUGFUNC("e1000_get_cable_length"); DEBUGFUNC("e1000_get_cable_length");
...@@ -4749,10 +4753,11 @@ e1000_get_cable_length(struct e1000_hw *hw, ...@@ -4749,10 +4753,11 @@ e1000_get_cable_length(struct e1000_hw *hw,
&phy_data); &phy_data);
if(ret_val) if(ret_val)
return ret_val; return ret_val;
cable_length = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >>
M88E1000_PSSR_CABLE_LENGTH_SHIFT;
/* Convert the enum value to ranged values */ /* Convert the enum value to ranged values */
switch((phy_data & M88E1000_PSSR_CABLE_LENGTH) >> switch (cable_length) {
M88E1000_PSSR_CABLE_LENGTH_SHIFT) {
case e1000_cable_length_50: case e1000_cable_length_50:
*min_length = 0; *min_length = 0;
*max_length = e1000_igp_cable_length_50; *max_length = e1000_igp_cable_length_50;
...@@ -4919,8 +4924,7 @@ e1000_check_downshift(struct e1000_hw *hw) ...@@ -4919,8 +4924,7 @@ e1000_check_downshift(struct e1000_hw *hw)
return ret_val; return ret_val;
hw->speed_downgraded = (phy_data & IGP01E1000_PLHR_SS_DOWNGRADE) ? 1 : 0; hw->speed_downgraded = (phy_data & IGP01E1000_PLHR_SS_DOWNGRADE) ? 1 : 0;
} } else if(hw->phy_type == e1000_phy_m88) {
else if(hw->phy_type == e1000_phy_m88) {
ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS,
&phy_data); &phy_data);
if(ret_val) if(ret_val)
......
...@@ -369,6 +369,7 @@ int32_t e1000_set_d3_lplu_state(struct e1000_hw *hw, boolean_t active); ...@@ -369,6 +369,7 @@ int32_t e1000_set_d3_lplu_state(struct e1000_hw *hw, boolean_t active);
#define E1000_DEV_ID_82546GB_SERDES 0x107B #define E1000_DEV_ID_82546GB_SERDES 0x107B
#define E1000_DEV_ID_82546GB_PCIE 0x108A #define E1000_DEV_ID_82546GB_PCIE 0x108A
#define E1000_DEV_ID_82547EI 0x1019 #define E1000_DEV_ID_82547EI 0x1019
#define NODE_ADDRESS_SIZE 6 #define NODE_ADDRESS_SIZE 6
#define ETH_LENGTH_OF_ADDRESS 6 #define ETH_LENGTH_OF_ADDRESS 6
...@@ -1734,6 +1735,9 @@ struct e1000_hw { ...@@ -1734,6 +1735,9 @@ struct e1000_hw {
#define PHY_1000T_STATUS 0x0A /* 1000Base-T Status Reg */ #define PHY_1000T_STATUS 0x0A /* 1000Base-T Status Reg */
#define PHY_EXT_STATUS 0x0F /* Extended Status Reg */ #define PHY_EXT_STATUS 0x0F /* Extended Status Reg */
#define MAX_PHY_REG_ADDRESS 0x1F /* 5 bit address bus (0-0x1F) */
#define MAX_PHY_MULTI_PAGE_REG 0xF /* Registers equal on all pages */
/* M88E1000 Specific Registers */ /* M88E1000 Specific Registers */
#define M88E1000_PHY_SPEC_CTRL 0x10 /* PHY Specific Control Register */ #define M88E1000_PHY_SPEC_CTRL 0x10 /* PHY Specific Control Register */
#define M88E1000_PHY_SPEC_STATUS 0x11 /* PHY Specific Status Register */ #define M88E1000_PHY_SPEC_STATUS 0x11 /* PHY Specific Status Register */
...@@ -1794,8 +1798,7 @@ struct e1000_hw { ...@@ -1794,8 +1798,7 @@ struct e1000_hw {
#define IGP01E1000_ANALOG_REGS_PAGE 0x20C0 #define IGP01E1000_ANALOG_REGS_PAGE 0x20C0
#define MAX_PHY_REG_ADDRESS 0x1F /* 5 bit address bus (0-0x1F) */
#define MAX_PHY_MULTI_PAGE_REG 0xF /*Registers that are equal on all pages*/
/* PHY Control Register */ /* PHY Control Register */
#define MII_CR_SPEED_SELECT_MSB 0x0040 /* bits 6,13: 10=1000, 01=100, 00=10 */ #define MII_CR_SPEED_SELECT_MSB 0x0040 /* bits 6,13: 10=1000, 01=100, 00=10 */
#define MII_CR_COLL_TEST_ENABLE 0x0080 /* Collision test enable */ #define MII_CR_COLL_TEST_ENABLE 0x0080 /* Collision test enable */
...@@ -2098,7 +2101,11 @@ struct e1000_hw { ...@@ -2098,7 +2101,11 @@ struct e1000_hw {
#define IGP01E1000_ANALOG_FUSE_FINE_1 0x0080 #define IGP01E1000_ANALOG_FUSE_FINE_1 0x0080
#define IGP01E1000_ANALOG_FUSE_FINE_10 0x0500 #define IGP01E1000_ANALOG_FUSE_FINE_10 0x0500
/* Bit definitions for valid PHY IDs. */ /* Bit definitions for valid PHY IDs. */
/* I = Integrated
* E = External
*/
#define M88E1000_E_PHY_ID 0x01410C50 #define M88E1000_E_PHY_ID 0x01410C50
#define M88E1000_I_PHY_ID 0x01410C30 #define M88E1000_I_PHY_ID 0x01410C30
#define M88E1011_I_PHY_ID 0x01410C20 #define M88E1011_I_PHY_ID 0x01410C20
......
...@@ -35,6 +35,14 @@ ...@@ -35,6 +35,14 @@
* - More errlogging support from Jon Mason <jonmason@us.ibm.com> * - More errlogging support from Jon Mason <jonmason@us.ibm.com>
* - Fix TSO issues on PPC64 machines -- Jon Mason <jonmason@us.ibm.com> * - Fix TSO issues on PPC64 machines -- Jon Mason <jonmason@us.ibm.com>
* *
* 5.7.1 12/16/04
* - Resurrect 82547EI/GI related fix in e1000_intr to avoid deadlocks. This
* fix was removed as it caused system instability. The suspected cause of
* this is the called to e1000_irq_disable in e1000_intr. Inlined the
* required piece of e1000_irq_disable into e1000_intr - Anton Blanchard
* 5.7.0 12/10/04
* - include fix to the condition that determines when to quit NAPI - Robert Olsson
* - use netif_poll_{disable/enable} to synchronize between NAPI and i/f up/down
* 5.6.5 11/01/04 * 5.6.5 11/01/04
* - Enabling NETIF_F_SG without checksum offload is illegal - * - Enabling NETIF_F_SG without checksum offload is illegal -
John Mason <jdmason@us.ibm.com> John Mason <jdmason@us.ibm.com>
...@@ -57,7 +65,7 @@ char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver"; ...@@ -57,7 +65,7 @@ char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
#else #else
#define DRIVERNAPI "-NAPI" #define DRIVERNAPI "-NAPI"
#endif #endif
char e1000_driver_version[] = "5.6.10.1-k2"DRIVERNAPI; char e1000_driver_version[] = "5.7.6-k2"DRIVERNAPI;
char e1000_copyright[] = "Copyright (c) 1999-2004 Intel Corporation."; char e1000_copyright[] = "Copyright (c) 1999-2004 Intel Corporation.";
/* e1000_pci_tbl - PCI Device ID Table /* e1000_pci_tbl - PCI Device ID Table
...@@ -81,6 +89,7 @@ static struct pci_device_id e1000_pci_tbl[] = { ...@@ -81,6 +89,7 @@ static struct pci_device_id e1000_pci_tbl[] = {
INTEL_E1000_ETHERNET_DEVICE(0x1011), INTEL_E1000_ETHERNET_DEVICE(0x1011),
INTEL_E1000_ETHERNET_DEVICE(0x1012), INTEL_E1000_ETHERNET_DEVICE(0x1012),
INTEL_E1000_ETHERNET_DEVICE(0x1013), INTEL_E1000_ETHERNET_DEVICE(0x1013),
INTEL_E1000_ETHERNET_DEVICE(0x1014),
INTEL_E1000_ETHERNET_DEVICE(0x1015), INTEL_E1000_ETHERNET_DEVICE(0x1015),
INTEL_E1000_ETHERNET_DEVICE(0x1016), INTEL_E1000_ETHERNET_DEVICE(0x1016),
INTEL_E1000_ETHERNET_DEVICE(0x1017), INTEL_E1000_ETHERNET_DEVICE(0x1017),
...@@ -308,6 +317,9 @@ e1000_up(struct e1000_adapter *adapter) ...@@ -308,6 +317,9 @@ e1000_up(struct e1000_adapter *adapter)
mod_timer(&adapter->watchdog_timer, jiffies); mod_timer(&adapter->watchdog_timer, jiffies);
e1000_irq_enable(adapter); e1000_irq_enable(adapter);
#ifdef CONFIG_E1000_NAPI
netif_poll_enable(netdev);
#endif
return 0; return 0;
} }
...@@ -321,6 +333,10 @@ e1000_down(struct e1000_adapter *adapter) ...@@ -321,6 +333,10 @@ e1000_down(struct e1000_adapter *adapter)
del_timer_sync(&adapter->tx_fifo_stall_timer); del_timer_sync(&adapter->tx_fifo_stall_timer);
del_timer_sync(&adapter->watchdog_timer); del_timer_sync(&adapter->watchdog_timer);
del_timer_sync(&adapter->phy_info_timer); del_timer_sync(&adapter->phy_info_timer);
#ifdef CONFIG_E1000_NAPI
netif_poll_disable(netdev);
#endif
adapter->link_speed = 0; adapter->link_speed = 0;
adapter->link_duplex = 0; adapter->link_duplex = 0;
netif_carrier_off(netdev); netif_carrier_off(netdev);
...@@ -414,6 +430,7 @@ e1000_probe(struct pci_dev *pdev, ...@@ -414,6 +430,7 @@ e1000_probe(struct pci_dev *pdev,
int i; int i;
int err; int err;
uint16_t eeprom_data; uint16_t eeprom_data;
uint16_t eeprom_apme_mask = E1000_EEPROM_APME;
if((err = pci_enable_device(pdev))) if((err = pci_enable_device(pdev)))
return err; return err;
...@@ -510,9 +527,6 @@ e1000_probe(struct pci_dev *pdev, ...@@ -510,9 +527,6 @@ e1000_probe(struct pci_dev *pdev,
} }
#ifdef NETIF_F_TSO #ifdef NETIF_F_TSO
/* Disbaled for now until root-cause is found for
* hangs reported against non-IA archs. TSO can be
* enabled using ethtool -K eth<x> tso on */
if((adapter->hw.mac_type >= e1000_82544) && if((adapter->hw.mac_type >= e1000_82544) &&
(adapter->hw.mac_type != e1000_82547)) (adapter->hw.mac_type != e1000_82547))
netdev->features |= NETIF_F_TSO; netdev->features |= NETIF_F_TSO;
...@@ -584,6 +598,11 @@ e1000_probe(struct pci_dev *pdev, ...@@ -584,6 +598,11 @@ e1000_probe(struct pci_dev *pdev,
case e1000_82542_rev2_1: case e1000_82542_rev2_1:
case e1000_82543: case e1000_82543:
break; break;
case e1000_82544:
e1000_read_eeprom(&adapter->hw,
EEPROM_INIT_CONTROL2_REG, 1, &eeprom_data);
eeprom_apme_mask = E1000_EEPROM_82544_APM;
break;
case e1000_82546: case e1000_82546:
case e1000_82546_rev_3: case e1000_82546_rev_3:
if((E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_FUNC_1) if((E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_FUNC_1)
...@@ -598,7 +617,7 @@ e1000_probe(struct pci_dev *pdev, ...@@ -598,7 +617,7 @@ e1000_probe(struct pci_dev *pdev,
EEPROM_INIT_CONTROL3_PORT_A, 1, &eeprom_data); EEPROM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
break; break;
} }
if(eeprom_data & E1000_EEPROM_APME) if(eeprom_data & eeprom_apme_mask)
adapter->wol |= E1000_WUFC_MAG; adapter->wol |= E1000_WUFC_MAG;
/* reset the hardware with the new settings */ /* reset the hardware with the new settings */
...@@ -806,6 +825,31 @@ e1000_close(struct net_device *netdev) ...@@ -806,6 +825,31 @@ e1000_close(struct net_device *netdev)
return 0; return 0;
} }
/**
* e1000_check_64k_bound - check that memory doesn't cross 64kB boundary
* @adapter: address of board private structure
* @begin: address of beginning of memory
* @end: address of end of memory
**/
static inline boolean_t
e1000_check_64k_bound(struct e1000_adapter *adapter,
void *start, unsigned long len)
{
unsigned long begin = (unsigned long) start;
unsigned long end = begin + len;
/* first rev 82545 and 82546 need to not allow any memory
* write location to cross a 64k boundary due to errata 23 */
if (adapter->hw.mac_type == e1000_82545 ||
adapter->hw.mac_type == e1000_82546 ) {
/* check buffer doesn't cross 64kB */
return ((begin ^ (end - 1)) >> 16) != 0 ? FALSE : TRUE;
}
return TRUE;
}
/** /**
* e1000_setup_tx_resources - allocate Tx resources (Descriptors) * e1000_setup_tx_resources - allocate Tx resources (Descriptors)
* @adapter: board private structure * @adapter: board private structure
...@@ -824,7 +868,7 @@ e1000_setup_tx_resources(struct e1000_adapter *adapter) ...@@ -824,7 +868,7 @@ e1000_setup_tx_resources(struct e1000_adapter *adapter)
txdr->buffer_info = vmalloc(size); txdr->buffer_info = vmalloc(size);
if(!txdr->buffer_info) { if(!txdr->buffer_info) {
DPRINTK(PROBE, ERR, DPRINTK(PROBE, ERR,
"Unble to Allocate Memory for the Transmit descriptor ring\n"); "Unable to Allocate Memory for the Transmit descriptor ring\n");
return -ENOMEM; return -ENOMEM;
} }
memset(txdr->buffer_info, 0, size); memset(txdr->buffer_info, 0, size);
...@@ -836,11 +880,42 @@ e1000_setup_tx_resources(struct e1000_adapter *adapter) ...@@ -836,11 +880,42 @@ e1000_setup_tx_resources(struct e1000_adapter *adapter)
txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma); txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma);
if(!txdr->desc) { if(!txdr->desc) {
setup_tx_desc_die:
DPRINTK(PROBE, ERR, DPRINTK(PROBE, ERR,
"Unble to Allocate Memory for the Transmit descriptor ring\n"); "Unable to Allocate Memory for the Transmit descriptor ring\n");
vfree(txdr->buffer_info); vfree(txdr->buffer_info);
return -ENOMEM; return -ENOMEM;
} }
/* fix for errata 23, cant cross 64kB boundary */
if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) {
void *olddesc = txdr->desc;
dma_addr_t olddma = txdr->dma;
DPRINTK(TX_ERR,ERR,"txdr align check failed: %u bytes at %p\n",
txdr->size, txdr->desc);
/* try again, without freeing the previous */
txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma);
/* failed allocation, critial failure */
if(!txdr->desc) {
pci_free_consistent(pdev, txdr->size, olddesc, olddma);
goto setup_tx_desc_die;
}
if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) {
/* give up */
pci_free_consistent(pdev, txdr->size,
txdr->desc, txdr->dma);
pci_free_consistent(pdev, txdr->size, olddesc, olddma);
DPRINTK(PROBE, ERR,
"Unable to Allocate aligned Memory for the Transmit"
" descriptor ring\n");
vfree(txdr->buffer_info);
return -ENOMEM;
} else {
/* free old, move on with the new one since its okay */
pci_free_consistent(pdev, txdr->size, olddesc, olddma);
}
}
memset(txdr->desc, 0, txdr->size); memset(txdr->desc, 0, txdr->size);
txdr->next_to_use = 0; txdr->next_to_use = 0;
...@@ -945,7 +1020,7 @@ e1000_setup_rx_resources(struct e1000_adapter *adapter) ...@@ -945,7 +1020,7 @@ e1000_setup_rx_resources(struct e1000_adapter *adapter)
rxdr->buffer_info = vmalloc(size); rxdr->buffer_info = vmalloc(size);
if(!rxdr->buffer_info) { if(!rxdr->buffer_info) {
DPRINTK(PROBE, ERR, DPRINTK(PROBE, ERR,
"Unble to Allocate Memory for the Recieve descriptor ring\n"); "Unable to Allocate Memory for the Recieve descriptor ring\n");
return -ENOMEM; return -ENOMEM;
} }
memset(rxdr->buffer_info, 0, size); memset(rxdr->buffer_info, 0, size);
...@@ -958,11 +1033,43 @@ e1000_setup_rx_resources(struct e1000_adapter *adapter) ...@@ -958,11 +1033,43 @@ e1000_setup_rx_resources(struct e1000_adapter *adapter)
rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma); rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma);
if(!rxdr->desc) { if(!rxdr->desc) {
setup_rx_desc_die:
DPRINTK(PROBE, ERR, DPRINTK(PROBE, ERR,
"Unble to Allocate Memory for the Recieve descriptor ring\n"); "Unble to Allocate Memory for the Recieve descriptor ring\n");
vfree(rxdr->buffer_info); vfree(rxdr->buffer_info);
return -ENOMEM; return -ENOMEM;
} }
/* fix for errata 23, cant cross 64kB boundary */
if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
void *olddesc = rxdr->desc;
dma_addr_t olddma = rxdr->dma;
DPRINTK(RX_ERR,ERR,
"rxdr align check failed: %u bytes at %p\n",
rxdr->size, rxdr->desc);
/* try again, without freeing the previous */
rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma);
/* failed allocation, critial failure */
if(!rxdr->desc) {
pci_free_consistent(pdev, rxdr->size, olddesc, olddma);
goto setup_rx_desc_die;
}
if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
/* give up */
pci_free_consistent(pdev, rxdr->size,
rxdr->desc, rxdr->dma);
pci_free_consistent(pdev, rxdr->size, olddesc, olddma);
DPRINTK(PROBE, ERR,
"Unable to Allocate aligned Memory for the"
" Receive descriptor ring\n");
vfree(rxdr->buffer_info);
return -ENOMEM;
} else {
/* free old, move on with the new one since its okay */
pci_free_consistent(pdev, rxdr->size, olddesc, olddma);
}
}
memset(rxdr->desc, 0, rxdr->size); memset(rxdr->desc, 0, rxdr->size);
rxdr->next_to_clean = 0; rxdr->next_to_clean = 0;
...@@ -1096,6 +1203,7 @@ e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter, ...@@ -1096,6 +1203,7 @@ e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter,
struct e1000_buffer *buffer_info) struct e1000_buffer *buffer_info)
{ {
struct pci_dev *pdev = adapter->pdev; struct pci_dev *pdev = adapter->pdev;
if(buffer_info->dma) { if(buffer_info->dma) {
pci_unmap_page(pdev, pci_unmap_page(pdev,
buffer_info->dma, buffer_info->dma,
...@@ -1124,6 +1232,11 @@ e1000_clean_tx_ring(struct e1000_adapter *adapter) ...@@ -1124,6 +1232,11 @@ e1000_clean_tx_ring(struct e1000_adapter *adapter)
/* Free all the Tx ring sk_buffs */ /* Free all the Tx ring sk_buffs */
if (likely(adapter->previous_buffer_info.skb != NULL)) {
e1000_unmap_and_free_tx_resource(adapter,
&adapter->previous_buffer_info);
}
for(i = 0; i < tx_ring->count; i++) { for(i = 0; i < tx_ring->count; i++) {
buffer_info = &tx_ring->buffer_info[i]; buffer_info = &tx_ring->buffer_info[i];
e1000_unmap_and_free_tx_resource(adapter, buffer_info); e1000_unmap_and_free_tx_resource(adapter, buffer_info);
...@@ -1425,7 +1538,6 @@ e1000_watchdog(unsigned long data) ...@@ -1425,7 +1538,6 @@ e1000_watchdog(unsigned long data)
struct e1000_adapter *adapter = (struct e1000_adapter *) data; struct e1000_adapter *adapter = (struct e1000_adapter *) data;
struct net_device *netdev = adapter->netdev; struct net_device *netdev = adapter->netdev;
struct e1000_desc_ring *txdr = &adapter->tx_ring; struct e1000_desc_ring *txdr = &adapter->tx_ring;
unsigned int i;
uint32_t link; uint32_t link;
e1000_check_for_link(&adapter->hw); e1000_check_for_link(&adapter->hw);
...@@ -1505,12 +1617,8 @@ e1000_watchdog(unsigned long data) ...@@ -1505,12 +1617,8 @@ e1000_watchdog(unsigned long data)
/* Cause software interrupt to ensure rx ring is cleaned */ /* Cause software interrupt to ensure rx ring is cleaned */
E1000_WRITE_REG(&adapter->hw, ICS, E1000_ICS_RXDMT0); E1000_WRITE_REG(&adapter->hw, ICS, E1000_ICS_RXDMT0);
/* Early detection of hung controller */ /* Force detection of hung controller every watchdog period*/
i = txdr->next_to_clean; adapter->detect_tx_hung = TRUE;
if(txdr->buffer_info[i].dma &&
time_after(jiffies, txdr->buffer_info[i].time_stamp + HZ) &&
!(E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_TXOFF))
netif_stop_queue(netdev);
/* Reset the timer */ /* Reset the timer */
mod_timer(&adapter->watchdog_timer, jiffies + 2 * HZ); mod_timer(&adapter->watchdog_timer, jiffies + 2 * HZ);
...@@ -2151,10 +2259,28 @@ e1000_intr(int irq, void *data, struct pt_regs *regs) ...@@ -2151,10 +2259,28 @@ e1000_intr(int irq, void *data, struct pt_regs *regs)
__netif_rx_schedule(netdev); __netif_rx_schedule(netdev);
} }
#else #else
/* Writing IMC and IMS is needed for 82547.
Due to Hub Link bus being occupied, an interrupt
de-assertion message is not able to be sent.
When an interrupt assertion message is generated later,
two messages are re-ordered and sent out.
That causes APIC to think 82547 is in de-assertion
state, while 82547 is in assertion state, resulting
in dead lock. Writing IMC forces 82547 into
de-assertion state.
*/
if(hw->mac_type == e1000_82547 || hw->mac_type == e1000_82547_rev_2){
atomic_inc(&adapter->irq_sem);
E1000_WRITE_REG(&adapter->hw, IMC, ~0);
}
for(i = 0; i < E1000_MAX_INTR; i++) for(i = 0; i < E1000_MAX_INTR; i++)
if(unlikely(!e1000_clean_rx_irq(adapter) & if(unlikely(!e1000_clean_rx_irq(adapter) &
!e1000_clean_tx_irq(adapter))) !e1000_clean_tx_irq(adapter)))
break; break;
if(hw->mac_type == e1000_82547 || hw->mac_type == e1000_82547_rev_2)
e1000_irq_enable(adapter);
#endif #endif
return IRQ_HANDLED; return IRQ_HANDLED;
...@@ -2174,24 +2300,21 @@ e1000_clean(struct net_device *netdev, int *budget) ...@@ -2174,24 +2300,21 @@ e1000_clean(struct net_device *netdev, int *budget)
int tx_cleaned; int tx_cleaned;
int work_done = 0; int work_done = 0;
if (!netif_carrier_ok(netdev))
goto quit_polling;
tx_cleaned = e1000_clean_tx_irq(adapter); tx_cleaned = e1000_clean_tx_irq(adapter);
e1000_clean_rx_irq(adapter, &work_done, work_to_do); e1000_clean_rx_irq(adapter, &work_done, work_to_do);
*budget -= work_done; *budget -= work_done;
netdev->quota -= work_done; netdev->quota -= work_done;
/* if no Rx and Tx cleanup work was done, exit the polling mode */ /* if no Tx and not enough Rx work done, exit the polling mode */
if(!tx_cleaned || (work_done < work_to_do) || if((!tx_cleaned && (work_done < work_to_do)) ||
!netif_running(netdev)) { !netif_running(netdev)) {
quit_polling: netif_rx_complete(netdev); netif_rx_complete(netdev);
e1000_irq_enable(adapter); e1000_irq_enable(adapter);
return 0; return 0;
} }
return (work_done >= work_to_do); return 1;
} }
#endif #endif
...@@ -2215,11 +2338,34 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter) ...@@ -2215,11 +2338,34 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter)
eop_desc = E1000_TX_DESC(*tx_ring, eop); eop_desc = E1000_TX_DESC(*tx_ring, eop);
while(eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) { while(eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) {
/* pre-mature writeback of Tx descriptors */
/* clear (free buffers and unmap pci_mapping) */
/* previous_buffer_info */
if (likely(adapter->previous_buffer_info.skb != NULL)) {
e1000_unmap_and_free_tx_resource(adapter,
&adapter->previous_buffer_info);
}
for(cleaned = FALSE; !cleaned; ) { for(cleaned = FALSE; !cleaned; ) {
tx_desc = E1000_TX_DESC(*tx_ring, i); tx_desc = E1000_TX_DESC(*tx_ring, i);
buffer_info = &tx_ring->buffer_info[i]; buffer_info = &tx_ring->buffer_info[i];
cleaned = (i == eop);
/* pre-mature writeback of Tx descriptors */
/* save the cleaning of the this for the */
/* next iteration */
if (cleaned) {
memcpy(&adapter->previous_buffer_info,
buffer_info,
sizeof(struct e1000_buffer));
memset(buffer_info,
0,
sizeof(struct e1000_buffer));
} else {
e1000_unmap_and_free_tx_resource(adapter,
buffer_info);
}
e1000_unmap_and_free_tx_resource(adapter, buffer_info);
tx_desc->buffer_addr = 0; tx_desc->buffer_addr = 0;
tx_desc->lower.data = 0; tx_desc->lower.data = 0;
tx_desc->upper.data = 0; tx_desc->upper.data = 0;
...@@ -2242,6 +2388,16 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter) ...@@ -2242,6 +2388,16 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter)
spin_unlock(&adapter->tx_lock); spin_unlock(&adapter->tx_lock);
if(adapter->detect_tx_hung) {
/* detect a transmit hang in hardware, this serializes the
* check with the clearing of time_stamp and movement of i */
adapter->detect_tx_hung = FALSE;
if(tx_ring->buffer_info[i].dma &&
time_after(jiffies, tx_ring->buffer_info[i].time_stamp + HZ) &&
!(E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_TXOFF))
netif_stop_queue(netdev);
}
return cleaned; return cleaned;
} }
...@@ -2407,19 +2563,43 @@ e1000_alloc_rx_buffers(struct e1000_adapter *adapter) ...@@ -2407,19 +2563,43 @@ e1000_alloc_rx_buffers(struct e1000_adapter *adapter)
struct e1000_rx_desc *rx_desc; struct e1000_rx_desc *rx_desc;
struct e1000_buffer *buffer_info; struct e1000_buffer *buffer_info;
struct sk_buff *skb; struct sk_buff *skb;
unsigned int i; unsigned int i, bufsz;
i = rx_ring->next_to_use; i = rx_ring->next_to_use;
buffer_info = &rx_ring->buffer_info[i]; buffer_info = &rx_ring->buffer_info[i];
while(!buffer_info->skb) { while(!buffer_info->skb) {
skb = dev_alloc_skb(adapter->rx_buffer_len + NET_IP_ALIGN); bufsz = adapter->rx_buffer_len + NET_IP_ALIGN;
skb = dev_alloc_skb(bufsz);
if(unlikely(!skb)) { if(unlikely(!skb)) {
/* Better luck next round */ /* Better luck next round */
break; break;
} }
/* fix for errata 23, cant cross 64kB boundary */
if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) {
struct sk_buff *oldskb = skb;
DPRINTK(RX_ERR,ERR,
"skb align check failed: %u bytes at %p\n",
bufsz, skb->data);
/* try again, without freeing the previous */
skb = dev_alloc_skb(bufsz);
if (!skb) {
dev_kfree_skb(oldskb);
break;
}
if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) {
/* give up */
dev_kfree_skb(skb);
dev_kfree_skb(oldskb);
break; /* while !buffer_info->skb */
} else {
/* move on with the new one */
dev_kfree_skb(oldskb);
}
}
/* Make buffer alignment 2 beyond a 16 byte boundary /* Make buffer alignment 2 beyond a 16 byte boundary
* this will result in a 16 byte aligned IP header after * this will result in a 16 byte aligned IP header after
* the 14 byte MAC header is removed * the 14 byte MAC header is removed
...@@ -2435,6 +2615,25 @@ e1000_alloc_rx_buffers(struct e1000_adapter *adapter) ...@@ -2435,6 +2615,25 @@ e1000_alloc_rx_buffers(struct e1000_adapter *adapter)
adapter->rx_buffer_len, adapter->rx_buffer_len,
PCI_DMA_FROMDEVICE); PCI_DMA_FROMDEVICE);
/* fix for errata 23, cant cross 64kB boundary */
if(!e1000_check_64k_bound(adapter,
(void *)(unsigned long)buffer_info->dma,
adapter->rx_buffer_len)) {
DPRINTK(RX_ERR,ERR,
"dma align check failed: %u bytes at %ld\n",
adapter->rx_buffer_len, (unsigned long)buffer_info->dma);
dev_kfree_skb(skb);
buffer_info->skb = NULL;
pci_unmap_single(pdev,
buffer_info->dma,
adapter->rx_buffer_len,
PCI_DMA_FROMDEVICE);
break; /* while !buffer_info->skb */
}
rx_desc = E1000_RX_DESC(*rx_ring, i); rx_desc = E1000_RX_DESC(*rx_ring, i);
rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment