Commit 6c722e90 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'upstream-linus' of master.kernel.org:/pub/scm/linux/kernel/git/jgarzik/netdev-2.6

* 'upstream-linus' of master.kernel.org:/pub/scm/linux/kernel/git/jgarzik/netdev-2.6: (42 commits)
  r8169: extraneous Cmd{Tx/Rx}Enb write
  forcedeth: modified comment header
  NetXen: Reducing ring sizes for IOMMU issue.
  NetXen: Fix for PPC machines.
  NetXen: work queue fixes.
  NetXen: Link status message correction for quad port cards.
  NetXen: Multiple adapter fix.
  NetXen: Using correct CHECKSUM flag.
  NetXen: driver reload fix for newer firmware.
  NetXen: Adding new device ids.
  PHY probe not working properly for ibm_emac (PPC4xx)
  ep93xx: some minor cleanups to the ep93xx eth driver
  sky2: phy power down needs PCI config write enabled
  sky2: power management/MSI workaround
  sky2: dual port NAPI problem
  via-velocity uses INET interfaces
  e1000: Do not truncate TSO TCP header with 82544 workaround
  myri10ge: handle failures in suspend and resume
  myri10ge: no need to save MSI and PCIe state in the driver
  myri10ge: make msi configurable at runtime through sysfs
  ...
parents 007fb598 81f4e6c1
...@@ -617,13 +617,15 @@ static int cp_rx_poll (struct net_device *dev, int *budget) ...@@ -617,13 +617,15 @@ static int cp_rx_poll (struct net_device *dev, int *budget)
* this round of polling * this round of polling
*/ */
if (rx_work) { if (rx_work) {
unsigned long flags;
if (cpr16(IntrStatus) & cp_rx_intr_mask) if (cpr16(IntrStatus) & cp_rx_intr_mask)
goto rx_status_loop; goto rx_status_loop;
local_irq_disable(); local_irq_save(flags);
cpw16_f(IntrMask, cp_intr_mask); cpw16_f(IntrMask, cp_intr_mask);
__netif_rx_complete(dev); __netif_rx_complete(dev);
local_irq_enable(); local_irq_restore(flags);
return 0; /* done */ return 0; /* done */
} }
......
...@@ -780,12 +780,10 @@ static struct ethtool_ops ep93xx_ethtool_ops = { ...@@ -780,12 +780,10 @@ static struct ethtool_ops ep93xx_ethtool_ops = {
struct net_device *ep93xx_dev_alloc(struct ep93xx_eth_data *data) struct net_device *ep93xx_dev_alloc(struct ep93xx_eth_data *data)
{ {
struct net_device *dev; struct net_device *dev;
struct ep93xx_priv *ep;
dev = alloc_etherdev(sizeof(struct ep93xx_priv)); dev = alloc_etherdev(sizeof(struct ep93xx_priv));
if (dev == NULL) if (dev == NULL)
return NULL; return NULL;
ep = netdev_priv(dev);
memcpy(dev->dev_addr, data->dev_addr, ETH_ALEN); memcpy(dev->dev_addr, data->dev_addr, ETH_ALEN);
...@@ -840,9 +838,9 @@ static int ep93xx_eth_probe(struct platform_device *pdev) ...@@ -840,9 +838,9 @@ static int ep93xx_eth_probe(struct platform_device *pdev)
struct ep93xx_priv *ep; struct ep93xx_priv *ep;
int err; int err;
data = pdev->dev.platform_data;
if (pdev == NULL) if (pdev == NULL)
return -ENODEV; return -ENODEV;
data = pdev->dev.platform_data;
dev = ep93xx_dev_alloc(data); dev = ep93xx_dev_alloc(data);
if (dev == NULL) { if (dev == NULL) {
......
...@@ -879,12 +879,14 @@ static int b44_poll(struct net_device *netdev, int *budget) ...@@ -879,12 +879,14 @@ static int b44_poll(struct net_device *netdev, int *budget)
} }
if (bp->istat & ISTAT_ERRORS) { if (bp->istat & ISTAT_ERRORS) {
spin_lock_irq(&bp->lock); unsigned long flags;
spin_lock_irqsave(&bp->lock, flags);
b44_halt(bp); b44_halt(bp);
b44_init_rings(bp); b44_init_rings(bp);
b44_init_hw(bp, 1); b44_init_hw(bp, 1);
netif_wake_queue(bp->dev); netif_wake_queue(bp->dev);
spin_unlock_irq(&bp->lock); spin_unlock_irqrestore(&bp->lock, flags);
done = 1; done = 1;
} }
......
...@@ -100,6 +100,9 @@ static const struct e1000_stats e1000_gstrings_stats[] = { ...@@ -100,6 +100,9 @@ static const struct e1000_stats e1000_gstrings_stats[] = {
{ "rx_csum_offload_errors", E1000_STAT(hw_csum_err) }, { "rx_csum_offload_errors", E1000_STAT(hw_csum_err) },
{ "rx_header_split", E1000_STAT(rx_hdr_split) }, { "rx_header_split", E1000_STAT(rx_hdr_split) },
{ "alloc_rx_buff_failed", E1000_STAT(alloc_rx_buff_failed) }, { "alloc_rx_buff_failed", E1000_STAT(alloc_rx_buff_failed) },
{ "tx_smbus", E1000_STAT(stats.mgptc) },
{ "rx_smbus", E1000_STAT(stats.mgprc) },
{ "dropped_smbus", E1000_STAT(stats.mgpdc) },
}; };
#define E1000_QUEUE_STATS_LEN 0 #define E1000_QUEUE_STATS_LEN 0
......
...@@ -308,141 +308,160 @@ e1000_phy_init_script(struct e1000_hw *hw) ...@@ -308,141 +308,160 @@ e1000_phy_init_script(struct e1000_hw *hw)
int32_t int32_t
e1000_set_mac_type(struct e1000_hw *hw) e1000_set_mac_type(struct e1000_hw *hw)
{ {
DEBUGFUNC("e1000_set_mac_type"); DEBUGFUNC("e1000_set_mac_type");
switch (hw->device_id) { switch (hw->device_id) {
case E1000_DEV_ID_82542: case E1000_DEV_ID_82542:
switch (hw->revision_id) { switch (hw->revision_id) {
case E1000_82542_2_0_REV_ID: case E1000_82542_2_0_REV_ID:
hw->mac_type = e1000_82542_rev2_0; hw->mac_type = e1000_82542_rev2_0;
break; break;
case E1000_82542_2_1_REV_ID: case E1000_82542_2_1_REV_ID:
hw->mac_type = e1000_82542_rev2_1; hw->mac_type = e1000_82542_rev2_1;
break; break;
default: default:
/* Invalid 82542 revision ID */ /* Invalid 82542 revision ID */
return -E1000_ERR_MAC_TYPE; return -E1000_ERR_MAC_TYPE;
} }
break; break;
case E1000_DEV_ID_82543GC_FIBER: case E1000_DEV_ID_82543GC_FIBER:
case E1000_DEV_ID_82543GC_COPPER: case E1000_DEV_ID_82543GC_COPPER:
hw->mac_type = e1000_82543; hw->mac_type = e1000_82543;
break; break;
case E1000_DEV_ID_82544EI_COPPER: case E1000_DEV_ID_82544EI_COPPER:
case E1000_DEV_ID_82544EI_FIBER: case E1000_DEV_ID_82544EI_FIBER:
case E1000_DEV_ID_82544GC_COPPER: case E1000_DEV_ID_82544GC_COPPER:
case E1000_DEV_ID_82544GC_LOM: case E1000_DEV_ID_82544GC_LOM:
hw->mac_type = e1000_82544; hw->mac_type = e1000_82544;
break; break;
case E1000_DEV_ID_82540EM: case E1000_DEV_ID_82540EM:
case E1000_DEV_ID_82540EM_LOM: case E1000_DEV_ID_82540EM_LOM:
case E1000_DEV_ID_82540EP: case E1000_DEV_ID_82540EP:
case E1000_DEV_ID_82540EP_LOM: case E1000_DEV_ID_82540EP_LOM:
case E1000_DEV_ID_82540EP_LP: case E1000_DEV_ID_82540EP_LP:
hw->mac_type = e1000_82540; hw->mac_type = e1000_82540;
break; break;
case E1000_DEV_ID_82545EM_COPPER: case E1000_DEV_ID_82545EM_COPPER:
case E1000_DEV_ID_82545EM_FIBER: case E1000_DEV_ID_82545EM_FIBER:
hw->mac_type = e1000_82545; hw->mac_type = e1000_82545;
break; break;
case E1000_DEV_ID_82545GM_COPPER: case E1000_DEV_ID_82545GM_COPPER:
case E1000_DEV_ID_82545GM_FIBER: case E1000_DEV_ID_82545GM_FIBER:
case E1000_DEV_ID_82545GM_SERDES: case E1000_DEV_ID_82545GM_SERDES:
hw->mac_type = e1000_82545_rev_3; hw->mac_type = e1000_82545_rev_3;
break; break;
case E1000_DEV_ID_82546EB_COPPER: case E1000_DEV_ID_82546EB_COPPER:
case E1000_DEV_ID_82546EB_FIBER: case E1000_DEV_ID_82546EB_FIBER:
case E1000_DEV_ID_82546EB_QUAD_COPPER: case E1000_DEV_ID_82546EB_QUAD_COPPER:
hw->mac_type = e1000_82546; hw->mac_type = e1000_82546;
break; break;
case E1000_DEV_ID_82546GB_COPPER: case E1000_DEV_ID_82546GB_COPPER:
case E1000_DEV_ID_82546GB_FIBER: case E1000_DEV_ID_82546GB_FIBER:
case E1000_DEV_ID_82546GB_SERDES: case E1000_DEV_ID_82546GB_SERDES:
case E1000_DEV_ID_82546GB_PCIE: case E1000_DEV_ID_82546GB_PCIE:
case E1000_DEV_ID_82546GB_QUAD_COPPER: case E1000_DEV_ID_82546GB_QUAD_COPPER:
case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3: case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
hw->mac_type = e1000_82546_rev_3; hw->mac_type = e1000_82546_rev_3;
break; break;
case E1000_DEV_ID_82541EI: case E1000_DEV_ID_82541EI:
case E1000_DEV_ID_82541EI_MOBILE: case E1000_DEV_ID_82541EI_MOBILE:
case E1000_DEV_ID_82541ER_LOM: case E1000_DEV_ID_82541ER_LOM:
hw->mac_type = e1000_82541; hw->mac_type = e1000_82541;
break; break;
case E1000_DEV_ID_82541ER: case E1000_DEV_ID_82541ER:
case E1000_DEV_ID_82541GI: case E1000_DEV_ID_82541GI:
case E1000_DEV_ID_82541GI_LF: case E1000_DEV_ID_82541GI_LF:
case E1000_DEV_ID_82541GI_MOBILE: case E1000_DEV_ID_82541GI_MOBILE:
hw->mac_type = e1000_82541_rev_2; hw->mac_type = e1000_82541_rev_2;
break; break;
case E1000_DEV_ID_82547EI: case E1000_DEV_ID_82547EI:
case E1000_DEV_ID_82547EI_MOBILE: case E1000_DEV_ID_82547EI_MOBILE:
hw->mac_type = e1000_82547; hw->mac_type = e1000_82547;
break; break;
case E1000_DEV_ID_82547GI: case E1000_DEV_ID_82547GI:
hw->mac_type = e1000_82547_rev_2; hw->mac_type = e1000_82547_rev_2;
break; break;
case E1000_DEV_ID_82571EB_COPPER: case E1000_DEV_ID_82571EB_COPPER:
case E1000_DEV_ID_82571EB_FIBER: case E1000_DEV_ID_82571EB_FIBER:
case E1000_DEV_ID_82571EB_SERDES: case E1000_DEV_ID_82571EB_SERDES:
case E1000_DEV_ID_82571EB_QUAD_COPPER: case E1000_DEV_ID_82571EB_QUAD_COPPER:
case E1000_DEV_ID_82571EB_QUAD_COPPER_LOWPROFILE: case E1000_DEV_ID_82571EB_QUAD_COPPER_LOWPROFILE:
hw->mac_type = e1000_82571; hw->mac_type = e1000_82571;
break; break;
case E1000_DEV_ID_82572EI_COPPER: case E1000_DEV_ID_82572EI_COPPER:
case E1000_DEV_ID_82572EI_FIBER: case E1000_DEV_ID_82572EI_FIBER:
case E1000_DEV_ID_82572EI_SERDES: case E1000_DEV_ID_82572EI_SERDES:
case E1000_DEV_ID_82572EI: case E1000_DEV_ID_82572EI:
hw->mac_type = e1000_82572; hw->mac_type = e1000_82572;
break; break;
case E1000_DEV_ID_82573E: case E1000_DEV_ID_82573E:
case E1000_DEV_ID_82573E_IAMT: case E1000_DEV_ID_82573E_IAMT:
case E1000_DEV_ID_82573L: case E1000_DEV_ID_82573L:
hw->mac_type = e1000_82573; hw->mac_type = e1000_82573;
break; break;
case E1000_DEV_ID_80003ES2LAN_COPPER_SPT: case E1000_DEV_ID_80003ES2LAN_COPPER_SPT:
case E1000_DEV_ID_80003ES2LAN_SERDES_SPT: case E1000_DEV_ID_80003ES2LAN_SERDES_SPT:
case E1000_DEV_ID_80003ES2LAN_COPPER_DPT: case E1000_DEV_ID_80003ES2LAN_COPPER_DPT:
case E1000_DEV_ID_80003ES2LAN_SERDES_DPT: case E1000_DEV_ID_80003ES2LAN_SERDES_DPT:
hw->mac_type = e1000_80003es2lan; hw->mac_type = e1000_80003es2lan;
break; break;
case E1000_DEV_ID_ICH8_IGP_M_AMT: case E1000_DEV_ID_ICH8_IGP_M_AMT:
case E1000_DEV_ID_ICH8_IGP_AMT: case E1000_DEV_ID_ICH8_IGP_AMT:
case E1000_DEV_ID_ICH8_IGP_C: case E1000_DEV_ID_ICH8_IGP_C:
case E1000_DEV_ID_ICH8_IFE: case E1000_DEV_ID_ICH8_IFE:
case E1000_DEV_ID_ICH8_IFE_GT: case E1000_DEV_ID_ICH8_IFE_GT:
case E1000_DEV_ID_ICH8_IFE_G: case E1000_DEV_ID_ICH8_IFE_G:
case E1000_DEV_ID_ICH8_IGP_M: case E1000_DEV_ID_ICH8_IGP_M:
hw->mac_type = e1000_ich8lan; hw->mac_type = e1000_ich8lan;
break; break;
default: default:
/* Should never have loaded on this device */ /* Should never have loaded on this device */
return -E1000_ERR_MAC_TYPE; return -E1000_ERR_MAC_TYPE;
} }
switch (hw->mac_type) { switch (hw->mac_type) {
case e1000_ich8lan: case e1000_ich8lan:
hw->swfwhw_semaphore_present = TRUE; hw->swfwhw_semaphore_present = TRUE;
hw->asf_firmware_present = TRUE; hw->asf_firmware_present = TRUE;
break; break;
case e1000_80003es2lan: case e1000_80003es2lan:
hw->swfw_sync_present = TRUE; hw->swfw_sync_present = TRUE;
/* fall through */ /* fall through */
case e1000_82571: case e1000_82571:
case e1000_82572: case e1000_82572:
case e1000_82573: case e1000_82573:
hw->eeprom_semaphore_present = TRUE; hw->eeprom_semaphore_present = TRUE;
/* fall through */ /* fall through */
case e1000_82541: case e1000_82541:
case e1000_82547: case e1000_82547:
case e1000_82541_rev_2: case e1000_82541_rev_2:
case e1000_82547_rev_2: case e1000_82547_rev_2:
hw->asf_firmware_present = TRUE; hw->asf_firmware_present = TRUE;
break; break;
default: default:
break; break;
} }
return E1000_SUCCESS; /* The 82543 chip does not count tx_carrier_errors properly in
* FD mode
*/
if (hw->mac_type == e1000_82543)
hw->bad_tx_carr_stats_fd = TRUE;
/* capable of receiving management packets to the host */
if (hw->mac_type >= e1000_82571)
hw->has_manc2h = TRUE;
/* In rare occasions, ESB2 systems would end up started without
* the RX unit being turned on.
*/
if (hw->mac_type == e1000_80003es2lan)
hw->rx_needs_kicking = TRUE;
if (hw->mac_type > e1000_82544)
hw->has_smbus = TRUE;
return E1000_SUCCESS;
} }
/***************************************************************************** /*****************************************************************************
...@@ -6575,7 +6594,7 @@ e1000_get_bus_info(struct e1000_hw *hw) ...@@ -6575,7 +6594,7 @@ e1000_get_bus_info(struct e1000_hw *hw)
switch (hw->mac_type) { switch (hw->mac_type) {
case e1000_82542_rev2_0: case e1000_82542_rev2_0:
case e1000_82542_rev2_1: case e1000_82542_rev2_1:
hw->bus_type = e1000_bus_type_unknown; hw->bus_type = e1000_bus_type_pci;
hw->bus_speed = e1000_bus_speed_unknown; hw->bus_speed = e1000_bus_speed_unknown;
hw->bus_width = e1000_bus_width_unknown; hw->bus_width = e1000_bus_width_unknown;
break; break;
...@@ -7817,9 +7836,8 @@ e1000_enable_mng_pass_thru(struct e1000_hw *hw) ...@@ -7817,9 +7836,8 @@ e1000_enable_mng_pass_thru(struct e1000_hw *hw)
fwsm = E1000_READ_REG(hw, FWSM); fwsm = E1000_READ_REG(hw, FWSM);
factps = E1000_READ_REG(hw, FACTPS); factps = E1000_READ_REG(hw, FACTPS);
if (((fwsm & E1000_FWSM_MODE_MASK) == if ((((fwsm & E1000_FWSM_MODE_MASK) >> E1000_FWSM_MODE_SHIFT) ==
(e1000_mng_mode_pt << E1000_FWSM_MODE_SHIFT)) && e1000_mng_mode_pt) && !(factps & E1000_FACTPS_MNGCG))
(factps & E1000_FACTPS_MNGCG))
return TRUE; return TRUE;
} else } else
if ((manc & E1000_MANC_SMBUS_EN) && !(manc & E1000_MANC_ASF_EN)) if ((manc & E1000_MANC_SMBUS_EN) && !(manc & E1000_MANC_ASF_EN))
......
...@@ -1301,165 +1301,170 @@ struct e1000_ffvt_entry { ...@@ -1301,165 +1301,170 @@ struct e1000_ffvt_entry {
#define E1000_82542_RSSIR E1000_RSSIR #define E1000_82542_RSSIR E1000_RSSIR
#define E1000_82542_KUMCTRLSTA E1000_KUMCTRLSTA #define E1000_82542_KUMCTRLSTA E1000_KUMCTRLSTA
#define E1000_82542_SW_FW_SYNC E1000_SW_FW_SYNC #define E1000_82542_SW_FW_SYNC E1000_SW_FW_SYNC
#define E1000_82542_MANC2H E1000_MANC2H
/* Statistics counters collected by the MAC */ /* Statistics counters collected by the MAC */
struct e1000_hw_stats { struct e1000_hw_stats {
uint64_t crcerrs; uint64_t crcerrs;
uint64_t algnerrc; uint64_t algnerrc;
uint64_t symerrs; uint64_t symerrs;
uint64_t rxerrc; uint64_t rxerrc;
uint64_t txerrc; uint64_t txerrc;
uint64_t mpc; uint64_t mpc;
uint64_t scc; uint64_t scc;
uint64_t ecol; uint64_t ecol;
uint64_t mcc; uint64_t mcc;
uint64_t latecol; uint64_t latecol;
uint64_t colc; uint64_t colc;
uint64_t dc; uint64_t dc;
uint64_t tncrs; uint64_t tncrs;
uint64_t sec; uint64_t sec;
uint64_t cexterr; uint64_t cexterr;
uint64_t rlec; uint64_t rlec;
uint64_t xonrxc; uint64_t xonrxc;
uint64_t xontxc; uint64_t xontxc;
uint64_t xoffrxc; uint64_t xoffrxc;
uint64_t xofftxc; uint64_t xofftxc;
uint64_t fcruc; uint64_t fcruc;
uint64_t prc64; uint64_t prc64;
uint64_t prc127; uint64_t prc127;
uint64_t prc255; uint64_t prc255;
uint64_t prc511; uint64_t prc511;
uint64_t prc1023; uint64_t prc1023;
uint64_t prc1522; uint64_t prc1522;
uint64_t gprc; uint64_t gprc;
uint64_t bprc; uint64_t bprc;
uint64_t mprc; uint64_t mprc;
uint64_t gptc; uint64_t gptc;
uint64_t gorcl; uint64_t gorcl;
uint64_t gorch; uint64_t gorch;
uint64_t gotcl; uint64_t gotcl;
uint64_t gotch; uint64_t gotch;
uint64_t rnbc; uint64_t rnbc;
uint64_t ruc; uint64_t ruc;
uint64_t rfc; uint64_t rfc;
uint64_t roc; uint64_t roc;
uint64_t rlerrc; uint64_t rlerrc;
uint64_t rjc; uint64_t rjc;
uint64_t mgprc; uint64_t mgprc;
uint64_t mgpdc; uint64_t mgpdc;
uint64_t mgptc; uint64_t mgptc;
uint64_t torl; uint64_t torl;
uint64_t torh; uint64_t torh;
uint64_t totl; uint64_t totl;
uint64_t toth; uint64_t toth;
uint64_t tpr; uint64_t tpr;
uint64_t tpt; uint64_t tpt;
uint64_t ptc64; uint64_t ptc64;
uint64_t ptc127; uint64_t ptc127;
uint64_t ptc255; uint64_t ptc255;
uint64_t ptc511; uint64_t ptc511;
uint64_t ptc1023; uint64_t ptc1023;
uint64_t ptc1522; uint64_t ptc1522;
uint64_t mptc; uint64_t mptc;
uint64_t bptc; uint64_t bptc;
uint64_t tsctc; uint64_t tsctc;
uint64_t tsctfc; uint64_t tsctfc;
uint64_t iac; uint64_t iac;
uint64_t icrxptc; uint64_t icrxptc;
uint64_t icrxatc; uint64_t icrxatc;
uint64_t ictxptc; uint64_t ictxptc;
uint64_t ictxatc; uint64_t ictxatc;
uint64_t ictxqec; uint64_t ictxqec;
uint64_t ictxqmtc; uint64_t ictxqmtc;
uint64_t icrxdmtc; uint64_t icrxdmtc;
uint64_t icrxoc; uint64_t icrxoc;
}; };
/* Structure containing variables used by the shared code (e1000_hw.c) */ /* Structure containing variables used by the shared code (e1000_hw.c) */
struct e1000_hw { struct e1000_hw {
uint8_t __iomem *hw_addr; uint8_t __iomem *hw_addr;
uint8_t __iomem *flash_address; uint8_t __iomem *flash_address;
e1000_mac_type mac_type; e1000_mac_type mac_type;
e1000_phy_type phy_type; e1000_phy_type phy_type;
uint32_t phy_init_script; uint32_t phy_init_script;
e1000_media_type media_type; e1000_media_type media_type;
void *back; void *back;
struct e1000_shadow_ram *eeprom_shadow_ram; struct e1000_shadow_ram *eeprom_shadow_ram;
uint32_t flash_bank_size; uint32_t flash_bank_size;
uint32_t flash_base_addr; uint32_t flash_base_addr;
e1000_fc_type fc; e1000_fc_type fc;
e1000_bus_speed bus_speed; e1000_bus_speed bus_speed;
e1000_bus_width bus_width; e1000_bus_width bus_width;
e1000_bus_type bus_type; e1000_bus_type bus_type;
struct e1000_eeprom_info eeprom; struct e1000_eeprom_info eeprom;
e1000_ms_type master_slave; e1000_ms_type master_slave;
e1000_ms_type original_master_slave; e1000_ms_type original_master_slave;
e1000_ffe_config ffe_config_state; e1000_ffe_config ffe_config_state;
uint32_t asf_firmware_present; uint32_t asf_firmware_present;
uint32_t eeprom_semaphore_present; uint32_t eeprom_semaphore_present;
uint32_t swfw_sync_present; uint32_t swfw_sync_present;
uint32_t swfwhw_semaphore_present; uint32_t swfwhw_semaphore_present;
unsigned long io_base; unsigned long io_base;
uint32_t phy_id; uint32_t phy_id;
uint32_t phy_revision; uint32_t phy_revision;
uint32_t phy_addr; uint32_t phy_addr;
uint32_t original_fc; uint32_t original_fc;
uint32_t txcw; uint32_t txcw;
uint32_t autoneg_failed; uint32_t autoneg_failed;
uint32_t max_frame_size; uint32_t max_frame_size;
uint32_t min_frame_size; uint32_t min_frame_size;
uint32_t mc_filter_type; uint32_t mc_filter_type;
uint32_t num_mc_addrs; uint32_t num_mc_addrs;
uint32_t collision_delta; uint32_t collision_delta;
uint32_t tx_packet_delta; uint32_t tx_packet_delta;
uint32_t ledctl_default; uint32_t ledctl_default;
uint32_t ledctl_mode1; uint32_t ledctl_mode1;
uint32_t ledctl_mode2; uint32_t ledctl_mode2;
boolean_t tx_pkt_filtering; boolean_t tx_pkt_filtering;
struct e1000_host_mng_dhcp_cookie mng_cookie; struct e1000_host_mng_dhcp_cookie mng_cookie;
uint16_t phy_spd_default; uint16_t phy_spd_default;
uint16_t autoneg_advertised; uint16_t autoneg_advertised;
uint16_t pci_cmd_word; uint16_t pci_cmd_word;
uint16_t fc_high_water; uint16_t fc_high_water;
uint16_t fc_low_water; uint16_t fc_low_water;
uint16_t fc_pause_time; uint16_t fc_pause_time;
uint16_t current_ifs_val; uint16_t current_ifs_val;
uint16_t ifs_min_val; uint16_t ifs_min_val;
uint16_t ifs_max_val; uint16_t ifs_max_val;
uint16_t ifs_step_size; uint16_t ifs_step_size;
uint16_t ifs_ratio; uint16_t ifs_ratio;
uint16_t device_id; uint16_t device_id;
uint16_t vendor_id; uint16_t vendor_id;
uint16_t subsystem_id; uint16_t subsystem_id;
uint16_t subsystem_vendor_id; uint16_t subsystem_vendor_id;
uint8_t revision_id; uint8_t revision_id;
uint8_t autoneg; uint8_t autoneg;
uint8_t mdix; uint8_t mdix;
uint8_t forced_speed_duplex; uint8_t forced_speed_duplex;
uint8_t wait_autoneg_complete; uint8_t wait_autoneg_complete;
uint8_t dma_fairness; uint8_t dma_fairness;
uint8_t mac_addr[NODE_ADDRESS_SIZE]; uint8_t mac_addr[NODE_ADDRESS_SIZE];
uint8_t perm_mac_addr[NODE_ADDRESS_SIZE]; uint8_t perm_mac_addr[NODE_ADDRESS_SIZE];
boolean_t disable_polarity_correction; boolean_t disable_polarity_correction;
boolean_t speed_downgraded; boolean_t speed_downgraded;
e1000_smart_speed smart_speed; e1000_smart_speed smart_speed;
e1000_dsp_config dsp_config_state; e1000_dsp_config dsp_config_state;
boolean_t get_link_status; boolean_t get_link_status;
boolean_t serdes_link_down; boolean_t serdes_link_down;
boolean_t tbi_compatibility_en; boolean_t tbi_compatibility_en;
boolean_t tbi_compatibility_on; boolean_t tbi_compatibility_on;
boolean_t laa_is_present; boolean_t laa_is_present;
boolean_t phy_reset_disable; boolean_t phy_reset_disable;
boolean_t initialize_hw_bits_disable; boolean_t initialize_hw_bits_disable;
boolean_t fc_send_xon; boolean_t fc_send_xon;
boolean_t fc_strict_ieee; boolean_t fc_strict_ieee;
boolean_t report_tx_early; boolean_t report_tx_early;
boolean_t adaptive_ifs; boolean_t adaptive_ifs;
boolean_t ifs_params_forced; boolean_t ifs_params_forced;
boolean_t in_ifs_mode; boolean_t in_ifs_mode;
boolean_t mng_reg_access_disabled; boolean_t mng_reg_access_disabled;
boolean_t leave_av_bit_off; boolean_t leave_av_bit_off;
boolean_t kmrn_lock_loss_workaround_disabled; boolean_t kmrn_lock_loss_workaround_disabled;
boolean_t bad_tx_carr_stats_fd;
boolean_t has_manc2h;
boolean_t rx_needs_kicking;
boolean_t has_smbus;
}; };
...@@ -2418,6 +2423,7 @@ struct e1000_host_command_info { ...@@ -2418,6 +2423,7 @@ struct e1000_host_command_info {
#define E1000_PBA_8K 0x0008 /* 8KB, default Rx allocation */ #define E1000_PBA_8K 0x0008 /* 8KB, default Rx allocation */
#define E1000_PBA_12K 0x000C /* 12KB, default Rx allocation */ #define E1000_PBA_12K 0x000C /* 12KB, default Rx allocation */
#define E1000_PBA_16K 0x0010 /* 16KB, default TX allocation */ #define E1000_PBA_16K 0x0010 /* 16KB, default TX allocation */
#define E1000_PBA_20K 0x0014
#define E1000_PBA_22K 0x0016 #define E1000_PBA_22K 0x0016
#define E1000_PBA_24K 0x0018 #define E1000_PBA_24K 0x0018
#define E1000_PBA_30K 0x001E #define E1000_PBA_30K 0x001E
......
This diff is collapsed.
...@@ -487,7 +487,9 @@ e1000_check_options(struct e1000_adapter *adapter) ...@@ -487,7 +487,9 @@ e1000_check_options(struct e1000_adapter *adapter)
e1000_validate_option(&adapter->itr, &opt, e1000_validate_option(&adapter->itr, &opt,
adapter); adapter);
/* save the setting, because the dynamic bits change itr */ /* save the setting, because the dynamic bits change itr */
adapter->itr_setting = adapter->itr; /* clear the lower two bits because they are
* used as control */
adapter->itr_setting = adapter->itr & ~3;
break; break;
} }
} else { } else {
......
...@@ -3,8 +3,7 @@ ...@@ -3,8 +3,7 @@
* *
* Note: This driver is a cleanroom reimplementation based on reverse * Note: This driver is a cleanroom reimplementation based on reverse
* engineered documentation written by Carl-Daniel Hailfinger * engineered documentation written by Carl-Daniel Hailfinger
* and Andrew de Quincey. It's neither supported nor endorsed * and Andrew de Quincey.
* by NVIDIA Corp. Use at your own risk.
* *
* NVIDIA, nForce and other NVIDIA marks are trademarks or registered * NVIDIA, nForce and other NVIDIA marks are trademarks or registered
* trademarks of NVIDIA Corporation in the United States and other * trademarks of NVIDIA Corporation in the United States and other
...@@ -14,7 +13,7 @@ ...@@ -14,7 +13,7 @@
* Copyright (C) 2004 Andrew de Quincey (wol support) * Copyright (C) 2004 Andrew de Quincey (wol support)
* Copyright (C) 2004 Carl-Daniel Hailfinger (invalid MAC handling, insane * Copyright (C) 2004 Carl-Daniel Hailfinger (invalid MAC handling, insane
* IRQ rate fixes, bigendian fixes, cleanups, verification) * IRQ rate fixes, bigendian fixes, cleanups, verification)
* Copyright (c) 2004 NVIDIA Corporation * Copyright (c) 2004,5,6 NVIDIA Corporation
* *
* This program is free software; you can redistribute it and/or modify * This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by * it under the terms of the GNU General Public License as published by
...@@ -2576,14 +2575,15 @@ static int nv_napi_poll(struct net_device *dev, int *budget) ...@@ -2576,14 +2575,15 @@ static int nv_napi_poll(struct net_device *dev, int *budget)
int pkts, limit = min(*budget, dev->quota); int pkts, limit = min(*budget, dev->quota);
struct fe_priv *np = netdev_priv(dev); struct fe_priv *np = netdev_priv(dev);
u8 __iomem *base = get_hwbase(dev); u8 __iomem *base = get_hwbase(dev);
unsigned long flags;
pkts = nv_rx_process(dev, limit); pkts = nv_rx_process(dev, limit);
if (nv_alloc_rx(dev)) { if (nv_alloc_rx(dev)) {
spin_lock_irq(&np->lock); spin_lock_irqsave(&np->lock, flags);
if (!np->in_shutdown) if (!np->in_shutdown)
mod_timer(&np->oom_kick, jiffies + OOM_REFILL); mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
spin_unlock_irq(&np->lock); spin_unlock_irqrestore(&np->lock, flags);
} }
if (pkts < limit) { if (pkts < limit) {
...@@ -2591,13 +2591,15 @@ static int nv_napi_poll(struct net_device *dev, int *budget) ...@@ -2591,13 +2591,15 @@ static int nv_napi_poll(struct net_device *dev, int *budget)
netif_rx_complete(dev); netif_rx_complete(dev);
/* re-enable receive interrupts */ /* re-enable receive interrupts */
spin_lock_irq(&np->lock); spin_lock_irqsave(&np->lock, flags);
np->irqmask |= NVREG_IRQ_RX_ALL; np->irqmask |= NVREG_IRQ_RX_ALL;
if (np->msi_flags & NV_MSI_X_ENABLED) if (np->msi_flags & NV_MSI_X_ENABLED)
writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask); writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask);
else else
writel(np->irqmask, base + NvRegIrqMask); writel(np->irqmask, base + NvRegIrqMask);
spin_unlock_irq(&np->lock);
spin_unlock_irqrestore(&np->lock, flags);
return 0; return 0;
} else { } else {
/* used up our quantum, so reschedule */ /* used up our quantum, so reschedule */
......
...@@ -309,7 +309,7 @@ int mii_phy_probe(struct mii_phy *phy, int address) ...@@ -309,7 +309,7 @@ int mii_phy_probe(struct mii_phy *phy, int address)
{ {
struct mii_phy_def *def; struct mii_phy_def *def;
int i; int i;
u32 id; int id;
phy->autoneg = AUTONEG_DISABLE; phy->autoneg = AUTONEG_DISABLE;
phy->advertising = 0; phy->advertising = 0;
...@@ -324,6 +324,8 @@ int mii_phy_probe(struct mii_phy *phy, int address) ...@@ -324,6 +324,8 @@ int mii_phy_probe(struct mii_phy *phy, int address)
/* Read ID and find matching entry */ /* Read ID and find matching entry */
id = (phy_read(phy, MII_PHYSID1) << 16) | phy_read(phy, MII_PHYSID2); id = (phy_read(phy, MII_PHYSID1) << 16) | phy_read(phy, MII_PHYSID2);
if (id < 0)
return -ENODEV;
for (i = 0; (def = mii_phy_table[i]) != NULL; i++) for (i = 0; (def = mii_phy_table[i]) != NULL; i++)
if ((id & def->phy_id_mask) == def->phy_id) if ((id & def->phy_id_mask) == def->phy_id)
break; break;
......
...@@ -199,8 +199,6 @@ struct myri10ge_priv { ...@@ -199,8 +199,6 @@ struct myri10ge_priv {
unsigned long serial_number; unsigned long serial_number;
int vendor_specific_offset; int vendor_specific_offset;
int fw_multicast_support; int fw_multicast_support;
u32 devctl;
u16 msi_flags;
u32 read_dma; u32 read_dma;
u32 write_dma; u32 write_dma;
u32 read_write_dma; u32 read_write_dma;
...@@ -228,7 +226,7 @@ module_param(myri10ge_small_bytes, int, S_IRUGO | S_IWUSR); ...@@ -228,7 +226,7 @@ module_param(myri10ge_small_bytes, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(myri10ge_small_bytes, "Threshold of small packets\n"); MODULE_PARM_DESC(myri10ge_small_bytes, "Threshold of small packets\n");
static int myri10ge_msi = 1; /* enable msi by default */ static int myri10ge_msi = 1; /* enable msi by default */
module_param(myri10ge_msi, int, S_IRUGO); module_param(myri10ge_msi, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(myri10ge_msi, "Enable Message Signalled Interrupts\n"); MODULE_PARM_DESC(myri10ge_msi, "Enable Message Signalled Interrupts\n");
static int myri10ge_intr_coal_delay = 25; static int myri10ge_intr_coal_delay = 25;
...@@ -721,12 +719,10 @@ static int myri10ge_reset(struct myri10ge_priv *mgp) ...@@ -721,12 +719,10 @@ static int myri10ge_reset(struct myri10ge_priv *mgp)
status |= status |=
myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_IRQ_ACK_OFFSET, &cmd, 0); myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_IRQ_ACK_OFFSET, &cmd, 0);
mgp->irq_claim = (__iomem __be32 *) (mgp->sram + cmd.data0); mgp->irq_claim = (__iomem __be32 *) (mgp->sram + cmd.data0);
if (!mgp->msi_enabled) { status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_IRQ_DEASSERT_OFFSET,
status |= myri10ge_send_cmd &cmd, 0);
(mgp, MXGEFW_CMD_GET_IRQ_DEASSERT_OFFSET, &cmd, 0); mgp->irq_deassert = (__iomem __be32 *) (mgp->sram + cmd.data0);
mgp->irq_deassert = (__iomem __be32 *) (mgp->sram + cmd.data0);
}
status |= myri10ge_send_cmd status |= myri10ge_send_cmd
(mgp, MXGEFW_CMD_GET_INTR_COAL_DELAY_OFFSET, &cmd, 0); (mgp, MXGEFW_CMD_GET_INTR_COAL_DELAY_OFFSET, &cmd, 0);
mgp->intr_coal_delay_ptr = (__iomem __be32 *) (mgp->sram + cmd.data0); mgp->intr_coal_delay_ptr = (__iomem __be32 *) (mgp->sram + cmd.data0);
...@@ -1619,6 +1615,41 @@ static void myri10ge_free_rings(struct net_device *dev) ...@@ -1619,6 +1615,41 @@ static void myri10ge_free_rings(struct net_device *dev)
mgp->tx.req_list = NULL; mgp->tx.req_list = NULL;
} }
static int myri10ge_request_irq(struct myri10ge_priv *mgp)
{
struct pci_dev *pdev = mgp->pdev;
int status;
if (myri10ge_msi) {
status = pci_enable_msi(pdev);
if (status != 0)
dev_err(&pdev->dev,
"Error %d setting up MSI; falling back to xPIC\n",
status);
else
mgp->msi_enabled = 1;
} else {
mgp->msi_enabled = 0;
}
status = request_irq(pdev->irq, myri10ge_intr, IRQF_SHARED,
mgp->dev->name, mgp);
if (status != 0) {
dev_err(&pdev->dev, "failed to allocate IRQ\n");
if (mgp->msi_enabled)
pci_disable_msi(pdev);
}
return status;
}
static void myri10ge_free_irq(struct myri10ge_priv *mgp)
{
struct pci_dev *pdev = mgp->pdev;
free_irq(pdev->irq, mgp);
if (mgp->msi_enabled)
pci_disable_msi(pdev);
}
static int myri10ge_open(struct net_device *dev) static int myri10ge_open(struct net_device *dev)
{ {
struct myri10ge_priv *mgp; struct myri10ge_priv *mgp;
...@@ -1634,10 +1665,13 @@ static int myri10ge_open(struct net_device *dev) ...@@ -1634,10 +1665,13 @@ static int myri10ge_open(struct net_device *dev)
status = myri10ge_reset(mgp); status = myri10ge_reset(mgp);
if (status != 0) { if (status != 0) {
printk(KERN_ERR "myri10ge: %s: failed reset\n", dev->name); printk(KERN_ERR "myri10ge: %s: failed reset\n", dev->name);
mgp->running = MYRI10GE_ETH_STOPPED; goto abort_with_nothing;
return -ENXIO;
} }
status = myri10ge_request_irq(mgp);
if (status != 0)
goto abort_with_nothing;
/* decide what small buffer size to use. For good TCP rx /* decide what small buffer size to use. For good TCP rx
* performance, it is important to not receive 1514 byte * performance, it is important to not receive 1514 byte
* frames into jumbo buffers, as it confuses the socket buffer * frames into jumbo buffers, as it confuses the socket buffer
...@@ -1677,7 +1711,7 @@ static int myri10ge_open(struct net_device *dev) ...@@ -1677,7 +1711,7 @@ static int myri10ge_open(struct net_device *dev)
"myri10ge: %s: failed to get ring sizes or locations\n", "myri10ge: %s: failed to get ring sizes or locations\n",
dev->name); dev->name);
mgp->running = MYRI10GE_ETH_STOPPED; mgp->running = MYRI10GE_ETH_STOPPED;
return -ENXIO; goto abort_with_irq;
} }
if (mgp->mtrr >= 0) { if (mgp->mtrr >= 0) {
...@@ -1708,7 +1742,7 @@ static int myri10ge_open(struct net_device *dev) ...@@ -1708,7 +1742,7 @@ static int myri10ge_open(struct net_device *dev)
status = myri10ge_allocate_rings(dev); status = myri10ge_allocate_rings(dev);
if (status != 0) if (status != 0)
goto abort_with_nothing; goto abort_with_irq;
/* now give firmware buffers sizes, and MTU */ /* now give firmware buffers sizes, and MTU */
cmd.data0 = dev->mtu + ETH_HLEN + VLAN_HLEN; cmd.data0 = dev->mtu + ETH_HLEN + VLAN_HLEN;
...@@ -1771,6 +1805,9 @@ static int myri10ge_open(struct net_device *dev) ...@@ -1771,6 +1805,9 @@ static int myri10ge_open(struct net_device *dev)
abort_with_rings: abort_with_rings:
myri10ge_free_rings(dev); myri10ge_free_rings(dev);
abort_with_irq:
myri10ge_free_irq(mgp);
abort_with_nothing: abort_with_nothing:
mgp->running = MYRI10GE_ETH_STOPPED; mgp->running = MYRI10GE_ETH_STOPPED;
return -ENOMEM; return -ENOMEM;
...@@ -1807,7 +1844,7 @@ static int myri10ge_close(struct net_device *dev) ...@@ -1807,7 +1844,7 @@ static int myri10ge_close(struct net_device *dev)
printk(KERN_ERR "myri10ge: %s never got down irq\n", dev->name); printk(KERN_ERR "myri10ge: %s never got down irq\n", dev->name);
netif_tx_disable(dev); netif_tx_disable(dev);
myri10ge_free_irq(mgp);
myri10ge_free_rings(dev); myri10ge_free_rings(dev);
mgp->running = MYRI10GE_ETH_STOPPED; mgp->running = MYRI10GE_ETH_STOPPED;
...@@ -2481,34 +2518,6 @@ static void myri10ge_select_firmware(struct myri10ge_priv *mgp) ...@@ -2481,34 +2518,6 @@ static void myri10ge_select_firmware(struct myri10ge_priv *mgp)
} }
} }
static void myri10ge_save_state(struct myri10ge_priv *mgp)
{
struct pci_dev *pdev = mgp->pdev;
int cap;
pci_save_state(pdev);
/* now save PCIe and MSI state that Linux will not
* save for us */
cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
pci_read_config_dword(pdev, cap + PCI_EXP_DEVCTL, &mgp->devctl);
cap = pci_find_capability(pdev, PCI_CAP_ID_MSI);
pci_read_config_word(pdev, cap + PCI_MSI_FLAGS, &mgp->msi_flags);
}
static void myri10ge_restore_state(struct myri10ge_priv *mgp)
{
struct pci_dev *pdev = mgp->pdev;
int cap;
/* restore PCIe and MSI state that linux will not */
cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
pci_write_config_dword(pdev, cap + PCI_CAP_ID_EXP, mgp->devctl);
cap = pci_find_capability(pdev, PCI_CAP_ID_MSI);
pci_write_config_word(pdev, cap + PCI_MSI_FLAGS, mgp->msi_flags);
pci_restore_state(pdev);
}
#ifdef CONFIG_PM #ifdef CONFIG_PM
static int myri10ge_suspend(struct pci_dev *pdev, pm_message_t state) static int myri10ge_suspend(struct pci_dev *pdev, pm_message_t state)
...@@ -2529,11 +2538,10 @@ static int myri10ge_suspend(struct pci_dev *pdev, pm_message_t state) ...@@ -2529,11 +2538,10 @@ static int myri10ge_suspend(struct pci_dev *pdev, pm_message_t state)
rtnl_unlock(); rtnl_unlock();
} }
myri10ge_dummy_rdma(mgp, 0); myri10ge_dummy_rdma(mgp, 0);
free_irq(pdev->irq, mgp); pci_save_state(pdev);
myri10ge_save_state(mgp);
pci_disable_device(pdev); pci_disable_device(pdev);
pci_set_power_state(pdev, pci_choose_state(pdev, state));
return 0; return pci_set_power_state(pdev, pci_choose_state(pdev, state));
} }
static int myri10ge_resume(struct pci_dev *pdev) static int myri10ge_resume(struct pci_dev *pdev)
...@@ -2555,34 +2563,33 @@ static int myri10ge_resume(struct pci_dev *pdev) ...@@ -2555,34 +2563,33 @@ static int myri10ge_resume(struct pci_dev *pdev)
mgp->dev->name); mgp->dev->name);
return -EIO; return -EIO;
} }
myri10ge_restore_state(mgp);
status = pci_restore_state(pdev);
if (status)
return status;
status = pci_enable_device(pdev); status = pci_enable_device(pdev);
if (status < 0) { if (status) {
dev_err(&pdev->dev, "failed to enable device\n"); dev_err(&pdev->dev, "failed to enable device\n");
return -EIO; return status;
} }
pci_set_master(pdev); pci_set_master(pdev);
status = request_irq(pdev->irq, myri10ge_intr, IRQF_SHARED,
netdev->name, mgp);
if (status != 0) {
dev_err(&pdev->dev, "failed to allocate IRQ\n");
goto abort_with_enabled;
}
myri10ge_reset(mgp); myri10ge_reset(mgp);
myri10ge_dummy_rdma(mgp, 1); myri10ge_dummy_rdma(mgp, 1);
/* Save configuration space to be restored if the /* Save configuration space to be restored if the
* nic resets due to a parity error */ * nic resets due to a parity error */
myri10ge_save_state(mgp); pci_save_state(pdev);
if (netif_running(netdev)) { if (netif_running(netdev)) {
rtnl_lock(); rtnl_lock();
myri10ge_open(netdev); status = myri10ge_open(netdev);
rtnl_unlock(); rtnl_unlock();
if (status != 0)
goto abort_with_enabled;
} }
netif_device_attach(netdev); netif_device_attach(netdev);
...@@ -2640,7 +2647,11 @@ static void myri10ge_watchdog(struct work_struct *work) ...@@ -2640,7 +2647,11 @@ static void myri10ge_watchdog(struct work_struct *work)
* when the driver was loaded, or the last time the * when the driver was loaded, or the last time the
* nic was resumed from power saving mode. * nic was resumed from power saving mode.
*/ */
myri10ge_restore_state(mgp); pci_restore_state(mgp->pdev);
/* save state again for accounting reasons */
pci_save_state(mgp->pdev);
} else { } else {
/* if we get back -1's from our slot, perhaps somebody /* if we get back -1's from our slot, perhaps somebody
* powered off our card. Don't try to reset it in * powered off our card. Don't try to reset it in
...@@ -2856,23 +2867,6 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -2856,23 +2867,6 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto abort_with_firmware; goto abort_with_firmware;
} }
if (myri10ge_msi) {
status = pci_enable_msi(pdev);
if (status != 0)
dev_err(&pdev->dev,
"Error %d setting up MSI; falling back to xPIC\n",
status);
else
mgp->msi_enabled = 1;
}
status = request_irq(pdev->irq, myri10ge_intr, IRQF_SHARED,
netdev->name, mgp);
if (status != 0) {
dev_err(&pdev->dev, "failed to allocate IRQ\n");
goto abort_with_firmware;
}
pci_set_drvdata(pdev, mgp); pci_set_drvdata(pdev, mgp);
if ((myri10ge_initial_mtu + ETH_HLEN) > MYRI10GE_MAX_ETHER_MTU) if ((myri10ge_initial_mtu + ETH_HLEN) > MYRI10GE_MAX_ETHER_MTU)
myri10ge_initial_mtu = MYRI10GE_MAX_ETHER_MTU - ETH_HLEN; myri10ge_initial_mtu = MYRI10GE_MAX_ETHER_MTU - ETH_HLEN;
...@@ -2896,7 +2890,7 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -2896,7 +2890,7 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Save configuration space to be restored if the /* Save configuration space to be restored if the
* nic resets due to a parity error */ * nic resets due to a parity error */
myri10ge_save_state(mgp); pci_save_state(pdev);
/* Setup the watchdog timer */ /* Setup the watchdog timer */
setup_timer(&mgp->watchdog_timer, myri10ge_watchdog_timer, setup_timer(&mgp->watchdog_timer, myri10ge_watchdog_timer,
...@@ -2907,19 +2901,16 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -2907,19 +2901,16 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
status = register_netdev(netdev); status = register_netdev(netdev);
if (status != 0) { if (status != 0) {
dev_err(&pdev->dev, "register_netdev failed: %d\n", status); dev_err(&pdev->dev, "register_netdev failed: %d\n", status);
goto abort_with_irq; goto abort_with_state;
} }
dev_info(dev, "%s IRQ %d, tx bndry %d, fw %s, WC %s\n", dev_info(dev, "%d, tx bndry %d, fw %s, WC %s\n",
(mgp->msi_enabled ? "MSI" : "xPIC"),
pdev->irq, mgp->tx.boundary, mgp->fw_name, pdev->irq, mgp->tx.boundary, mgp->fw_name,
(mgp->mtrr >= 0 ? "Enabled" : "Disabled")); (mgp->mtrr >= 0 ? "Enabled" : "Disabled"));
return 0; return 0;
abort_with_irq: abort_with_state:
free_irq(pdev->irq, mgp); pci_restore_state(pdev);
if (mgp->msi_enabled)
pci_disable_msi(pdev);
abort_with_firmware: abort_with_firmware:
myri10ge_dummy_rdma(mgp, 0); myri10ge_dummy_rdma(mgp, 0);
...@@ -2970,12 +2961,12 @@ static void myri10ge_remove(struct pci_dev *pdev) ...@@ -2970,12 +2961,12 @@ static void myri10ge_remove(struct pci_dev *pdev)
flush_scheduled_work(); flush_scheduled_work();
netdev = mgp->dev; netdev = mgp->dev;
unregister_netdev(netdev); unregister_netdev(netdev);
free_irq(pdev->irq, mgp);
if (mgp->msi_enabled)
pci_disable_msi(pdev);
myri10ge_dummy_rdma(mgp, 0); myri10ge_dummy_rdma(mgp, 0);
/* avoid a memory leak */
pci_restore_state(pdev);
bytes = myri10ge_max_intr_slots * sizeof(*mgp->rx_done.entry); bytes = myri10ge_max_intr_slots * sizeof(*mgp->rx_done.entry);
dma_free_coherent(&pdev->dev, bytes, dma_free_coherent(&pdev->dev, bytes,
mgp->rx_done.entry, mgp->rx_done.bus); mgp->rx_done.entry, mgp->rx_done.bus);
......
...@@ -63,7 +63,7 @@ ...@@ -63,7 +63,7 @@
#include "netxen_nic_hw.h" #include "netxen_nic_hw.h"
#define NETXEN_NIC_BUILD_NO "1" #define NETXEN_NIC_BUILD_NO "4"
#define _NETXEN_NIC_LINUX_MAJOR 3 #define _NETXEN_NIC_LINUX_MAJOR 3
#define _NETXEN_NIC_LINUX_MINOR 3 #define _NETXEN_NIC_LINUX_MINOR 3
#define _NETXEN_NIC_LINUX_SUBVERSION 2 #define _NETXEN_NIC_LINUX_SUBVERSION 2
...@@ -137,7 +137,7 @@ extern struct workqueue_struct *netxen_workq; ...@@ -137,7 +137,7 @@ extern struct workqueue_struct *netxen_workq;
#define THIRD_PAGE_GROUP_SIZE THIRD_PAGE_GROUP_END - THIRD_PAGE_GROUP_START #define THIRD_PAGE_GROUP_SIZE THIRD_PAGE_GROUP_END - THIRD_PAGE_GROUP_START
#define MAX_RX_BUFFER_LENGTH 1760 #define MAX_RX_BUFFER_LENGTH 1760
#define MAX_RX_JUMBO_BUFFER_LENGTH 9046 #define MAX_RX_JUMBO_BUFFER_LENGTH 8062
#define MAX_RX_LRO_BUFFER_LENGTH ((48*1024)-512) #define MAX_RX_LRO_BUFFER_LENGTH ((48*1024)-512)
#define RX_DMA_MAP_LEN (MAX_RX_BUFFER_LENGTH - 2) #define RX_DMA_MAP_LEN (MAX_RX_BUFFER_LENGTH - 2)
#define RX_JUMBO_DMA_MAP_LEN \ #define RX_JUMBO_DMA_MAP_LEN \
...@@ -199,9 +199,9 @@ enum { ...@@ -199,9 +199,9 @@ enum {
(RCV_DESC_NORMAL))) (RCV_DESC_NORMAL)))
#define MAX_CMD_DESCRIPTORS 1024 #define MAX_CMD_DESCRIPTORS 1024
#define MAX_RCV_DESCRIPTORS 32768 #define MAX_RCV_DESCRIPTORS 16384
#define MAX_JUMBO_RCV_DESCRIPTORS 4096 #define MAX_JUMBO_RCV_DESCRIPTORS 1024
#define MAX_LRO_RCV_DESCRIPTORS 2048 #define MAX_LRO_RCV_DESCRIPTORS 64
#define MAX_RCVSTATUS_DESCRIPTORS MAX_RCV_DESCRIPTORS #define MAX_RCVSTATUS_DESCRIPTORS MAX_RCV_DESCRIPTORS
#define MAX_JUMBO_RCV_DESC MAX_JUMBO_RCV_DESCRIPTORS #define MAX_JUMBO_RCV_DESC MAX_JUMBO_RCV_DESCRIPTORS
#define MAX_RCV_DESC MAX_RCV_DESCRIPTORS #define MAX_RCV_DESC MAX_RCV_DESCRIPTORS
...@@ -852,8 +852,6 @@ struct netxen_adapter { ...@@ -852,8 +852,6 @@ struct netxen_adapter {
spinlock_t tx_lock; spinlock_t tx_lock;
spinlock_t lock; spinlock_t lock;
struct work_struct watchdog_task; struct work_struct watchdog_task;
struct work_struct tx_timeout_task;
struct net_device *netdev;
struct timer_list watchdog_timer; struct timer_list watchdog_timer;
u32 curr_window; u32 curr_window;
...@@ -887,7 +885,6 @@ struct netxen_adapter { ...@@ -887,7 +885,6 @@ struct netxen_adapter {
struct netxen_recv_context recv_ctx[MAX_RCV_CTX]; struct netxen_recv_context recv_ctx[MAX_RCV_CTX];
int is_up; int is_up;
int number;
struct netxen_dummy_dma dummy_dma; struct netxen_dummy_dma dummy_dma;
/* Context interface shared between card and host */ /* Context interface shared between card and host */
...@@ -950,6 +947,7 @@ struct netxen_port { ...@@ -950,6 +947,7 @@ struct netxen_port {
struct pci_dev *pdev; struct pci_dev *pdev;
struct net_device_stats net_stats; struct net_device_stats net_stats;
struct netxen_port_stats stats; struct netxen_port_stats stats;
struct work_struct tx_timeout_task;
}; };
#define PCI_OFFSET_FIRST_RANGE(adapter, off) \ #define PCI_OFFSET_FIRST_RANGE(adapter, off) \
......
...@@ -376,7 +376,7 @@ void netxen_tso_check(struct netxen_adapter *adapter, ...@@ -376,7 +376,7 @@ void netxen_tso_check(struct netxen_adapter *adapter,
((skb->nh.iph)->ihl * sizeof(u32)) + ((skb->nh.iph)->ihl * sizeof(u32)) +
((skb->h.th)->doff * sizeof(u32)); ((skb->h.th)->doff * sizeof(u32));
netxen_set_cmd_desc_opcode(desc, TX_TCP_LSO); netxen_set_cmd_desc_opcode(desc, TX_TCP_LSO);
} else if (skb->ip_summed == CHECKSUM_COMPLETE) { } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
if (skb->nh.iph->protocol == IPPROTO_TCP) { if (skb->nh.iph->protocol == IPPROTO_TCP) {
netxen_set_cmd_desc_opcode(desc, TX_TCP_PKT); netxen_set_cmd_desc_opcode(desc, TX_TCP_PKT);
} else if (skb->nh.iph->protocol == IPPROTO_UDP) { } else if (skb->nh.iph->protocol == IPPROTO_UDP) {
......
...@@ -927,7 +927,7 @@ u32 netxen_process_rcv_ring(struct netxen_adapter *adapter, int ctxid, int max) ...@@ -927,7 +927,7 @@ u32 netxen_process_rcv_ring(struct netxen_adapter *adapter, int ctxid, int max)
} }
netxen_process_rcv(adapter, ctxid, desc); netxen_process_rcv(adapter, ctxid, desc);
netxen_clear_sts_owner(desc); netxen_clear_sts_owner(desc);
netxen_set_sts_owner(desc, STATUS_OWNER_PHANTOM); netxen_set_sts_owner(desc, cpu_to_le16(STATUS_OWNER_PHANTOM));
consumer = (consumer + 1) & (adapter->max_rx_desc_count - 1); consumer = (consumer + 1) & (adapter->max_rx_desc_count - 1);
count++; count++;
} }
...@@ -1022,7 +1022,7 @@ int netxen_process_cmd_ring(unsigned long data) ...@@ -1022,7 +1022,7 @@ int netxen_process_cmd_ring(unsigned long data)
&& netif_carrier_ok(port->netdev)) && netif_carrier_ok(port->netdev))
&& ((jiffies - port->netdev->trans_start) > && ((jiffies - port->netdev->trans_start) >
port->netdev->watchdog_timeo)) { port->netdev->watchdog_timeo)) {
SCHEDULE_WORK(&port->adapter->tx_timeout_task); SCHEDULE_WORK(&port->tx_timeout_task);
} }
last_consumer = get_next_index(last_consumer, last_consumer = get_next_index(last_consumer,
...@@ -1137,13 +1137,13 @@ void netxen_post_rx_buffers(struct netxen_adapter *adapter, u32 ctx, u32 ringid) ...@@ -1137,13 +1137,13 @@ void netxen_post_rx_buffers(struct netxen_adapter *adapter, u32 ctx, u32 ringid)
*/ */
dma = pci_map_single(pdev, skb->data, rcv_desc->dma_size, dma = pci_map_single(pdev, skb->data, rcv_desc->dma_size,
PCI_DMA_FROMDEVICE); PCI_DMA_FROMDEVICE);
pdesc->addr_buffer = dma; pdesc->addr_buffer = cpu_to_le64(dma);
buffer->skb = skb; buffer->skb = skb;
buffer->state = NETXEN_BUFFER_BUSY; buffer->state = NETXEN_BUFFER_BUSY;
buffer->dma = dma; buffer->dma = dma;
/* make a rcv descriptor */ /* make a rcv descriptor */
pdesc->reference_handle = buffer->ref_handle; pdesc->reference_handle = cpu_to_le16(buffer->ref_handle);
pdesc->buffer_length = rcv_desc->dma_size; pdesc->buffer_length = cpu_to_le32(rcv_desc->dma_size);
DPRINTK(INFO, "done writing descripter\n"); DPRINTK(INFO, "done writing descripter\n");
producer = producer =
get_next_index(producer, rcv_desc->max_rx_desc_count); get_next_index(producer, rcv_desc->max_rx_desc_count);
...@@ -1231,8 +1231,8 @@ void netxen_post_rx_buffers_nodb(struct netxen_adapter *adapter, uint32_t ctx, ...@@ -1231,8 +1231,8 @@ void netxen_post_rx_buffers_nodb(struct netxen_adapter *adapter, uint32_t ctx,
PCI_DMA_FROMDEVICE); PCI_DMA_FROMDEVICE);
/* make a rcv descriptor */ /* make a rcv descriptor */
pdesc->reference_handle = le16_to_cpu(buffer->ref_handle); pdesc->reference_handle = cpu_to_le16(buffer->ref_handle);
pdesc->buffer_length = le16_to_cpu(rcv_desc->dma_size); pdesc->buffer_length = cpu_to_le16(rcv_desc->dma_size);
pdesc->addr_buffer = cpu_to_le64(buffer->dma); pdesc->addr_buffer = cpu_to_le64(buffer->dma);
DPRINTK(INFO, "done writing descripter\n"); DPRINTK(INFO, "done writing descripter\n");
producer = producer =
......
...@@ -157,7 +157,8 @@ void netxen_nic_isr_other(struct netxen_adapter *adapter) ...@@ -157,7 +157,8 @@ void netxen_nic_isr_other(struct netxen_adapter *adapter)
for (portno = 0; portno < NETXEN_NIU_MAX_GBE_PORTS; portno++) { for (portno = 0; portno < NETXEN_NIU_MAX_GBE_PORTS; portno++) {
linkup = val & 1; linkup = val & 1;
if (linkup != (qg_linksup & 1)) { if (linkup != (qg_linksup & 1)) {
printk(KERN_INFO "%s: PORT %d link %s\n", printk(KERN_INFO "%s: %s PORT %d link %s\n",
adapter->port[portno]->netdev->name,
netxen_nic_driver_name, portno, netxen_nic_driver_name, portno,
((linkup == 0) ? "down" : "up")); ((linkup == 0) ? "down" : "up"));
netxen_indicate_link_status(adapter, portno, linkup); netxen_indicate_link_status(adapter, portno, linkup);
......
...@@ -52,8 +52,6 @@ char netxen_nic_driver_name[] = "netxen-nic"; ...@@ -52,8 +52,6 @@ char netxen_nic_driver_name[] = "netxen-nic";
static char netxen_nic_driver_string[] = "NetXen Network Driver version " static char netxen_nic_driver_string[] = "NetXen Network Driver version "
NETXEN_NIC_LINUX_VERSIONID; NETXEN_NIC_LINUX_VERSIONID;
struct netxen_adapter *g_adapter = NULL;
#define NETXEN_NETDEV_WEIGHT 120 #define NETXEN_NETDEV_WEIGHT 120
#define NETXEN_ADAPTER_UP_MAGIC 777 #define NETXEN_ADAPTER_UP_MAGIC 777
#define NETXEN_NIC_PEG_TUNE 0 #define NETXEN_NIC_PEG_TUNE 0
...@@ -87,6 +85,8 @@ static struct pci_device_id netxen_pci_tbl[] __devinitdata = { ...@@ -87,6 +85,8 @@ static struct pci_device_id netxen_pci_tbl[] __devinitdata = {
{PCI_DEVICE(0x4040, 0x0003)}, {PCI_DEVICE(0x4040, 0x0003)},
{PCI_DEVICE(0x4040, 0x0004)}, {PCI_DEVICE(0x4040, 0x0004)},
{PCI_DEVICE(0x4040, 0x0005)}, {PCI_DEVICE(0x4040, 0x0005)},
{PCI_DEVICE(0x4040, 0x0024)},
{PCI_DEVICE(0x4040, 0x0025)},
{0,} {0,}
}; };
...@@ -126,7 +126,6 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -126,7 +126,6 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct netxen_cmd_buffer *cmd_buf_arr = NULL; struct netxen_cmd_buffer *cmd_buf_arr = NULL;
u64 mac_addr[FLASH_NUM_PORTS + 1]; u64 mac_addr[FLASH_NUM_PORTS + 1];
int valid_mac = 0; int valid_mac = 0;
static int netxen_cards_found = 0;
printk(KERN_INFO "%s \n", netxen_nic_driver_string); printk(KERN_INFO "%s \n", netxen_nic_driver_string);
/* In current scheme, we use only PCI function 0 */ /* In current scheme, we use only PCI function 0 */
...@@ -217,9 +216,6 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -217,9 +216,6 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_out_dbunmap; goto err_out_dbunmap;
} }
if (netxen_cards_found == 0) {
g_adapter = adapter;
}
adapter->max_tx_desc_count = MAX_CMD_DESCRIPTORS; adapter->max_tx_desc_count = MAX_CMD_DESCRIPTORS;
adapter->max_rx_desc_count = MAX_RCV_DESCRIPTORS; adapter->max_rx_desc_count = MAX_RCV_DESCRIPTORS;
adapter->max_jumbo_rx_desc_count = MAX_JUMBO_RCV_DESCRIPTORS; adapter->max_jumbo_rx_desc_count = MAX_JUMBO_RCV_DESCRIPTORS;
...@@ -424,8 +420,7 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -424,8 +420,7 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->dev_addr); netdev->dev_addr);
} }
} }
adapter->netdev = netdev; INIT_WORK(&port->tx_timeout_task, netxen_tx_timeout_task);
INIT_WORK(&adapter->tx_timeout_task, netxen_tx_timeout_task);
netif_carrier_off(netdev); netif_carrier_off(netdev);
netif_stop_queue(netdev); netif_stop_queue(netdev);
...@@ -440,6 +435,11 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -440,6 +435,11 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
adapter->port[i] = port; adapter->port[i] = port;
} }
writel(0, NETXEN_CRB_NORMALIZE(adapter, CRB_CMDPEG_STATE));
netxen_pinit_from_rom(adapter, 0);
udelay(500);
netxen_load_firmware(adapter);
netxen_phantom_init(adapter, NETXEN_NIC_PEG_TUNE);
/* /*
* delay a while to ensure that the Pegs are up & running. * delay a while to ensure that the Pegs are up & running.
* Otherwise, we might see some flaky behaviour. * Otherwise, we might see some flaky behaviour.
...@@ -457,7 +457,6 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -457,7 +457,6 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
break; break;
} }
adapter->number = netxen_cards_found;
adapter->driver_mismatch = 0; adapter->driver_mismatch = 0;
return 0; return 0;
...@@ -527,6 +526,8 @@ static void __devexit netxen_nic_remove(struct pci_dev *pdev) ...@@ -527,6 +526,8 @@ static void __devexit netxen_nic_remove(struct pci_dev *pdev)
netxen_nic_stop_all_ports(adapter); netxen_nic_stop_all_ports(adapter);
/* leave the hw in the same state as reboot */ /* leave the hw in the same state as reboot */
netxen_pinit_from_rom(adapter, 0);
writel(0, NETXEN_CRB_NORMALIZE(adapter, CRB_CMDPEG_STATE));
netxen_load_firmware(adapter); netxen_load_firmware(adapter);
netxen_free_adapter_offload(adapter); netxen_free_adapter_offload(adapter);
...@@ -817,8 +818,8 @@ static int netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev) ...@@ -817,8 +818,8 @@ static int netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
/* Take skb->data itself */ /* Take skb->data itself */
pbuf = &adapter->cmd_buf_arr[producer]; pbuf = &adapter->cmd_buf_arr[producer];
if ((netdev->features & NETIF_F_TSO) && skb_shinfo(skb)->gso_size > 0) { if ((netdev->features & NETIF_F_TSO) && skb_shinfo(skb)->gso_size > 0) {
pbuf->mss = skb_shinfo(skb)->gso_size; pbuf->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
hwdesc->mss = skb_shinfo(skb)->gso_size; hwdesc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
} else { } else {
pbuf->mss = 0; pbuf->mss = 0;
hwdesc->mss = 0; hwdesc->mss = 0;
...@@ -952,11 +953,6 @@ static int netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev) ...@@ -952,11 +953,6 @@ static int netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
static void netxen_watchdog(unsigned long v) static void netxen_watchdog(unsigned long v)
{ {
struct netxen_adapter *adapter = (struct netxen_adapter *)v; struct netxen_adapter *adapter = (struct netxen_adapter *)v;
if (adapter != g_adapter) {
printk("%s: ***BUG*** adapter[%p] != g_adapter[%p]\n",
__FUNCTION__, adapter, g_adapter);
return;
}
SCHEDULE_WORK(&adapter->watchdog_task); SCHEDULE_WORK(&adapter->watchdog_task);
} }
...@@ -965,23 +961,23 @@ static void netxen_tx_timeout(struct net_device *netdev) ...@@ -965,23 +961,23 @@ static void netxen_tx_timeout(struct net_device *netdev)
{ {
struct netxen_port *port = (struct netxen_port *)netdev_priv(netdev); struct netxen_port *port = (struct netxen_port *)netdev_priv(netdev);
SCHEDULE_WORK(&port->adapter->tx_timeout_task); SCHEDULE_WORK(&port->tx_timeout_task);
} }
static void netxen_tx_timeout_task(struct work_struct *work) static void netxen_tx_timeout_task(struct work_struct *work)
{ {
struct netxen_adapter *adapter = struct netxen_port *port =
container_of(work, struct netxen_adapter, tx_timeout_task); container_of(work, struct netxen_port, tx_timeout_task);
struct net_device *netdev = adapter->netdev; struct net_device *netdev = port->netdev;
unsigned long flags; unsigned long flags;
printk(KERN_ERR "%s %s: transmit timeout, resetting.\n", printk(KERN_ERR "%s %s: transmit timeout, resetting.\n",
netxen_nic_driver_name, netdev->name); netxen_nic_driver_name, netdev->name);
spin_lock_irqsave(&adapter->lock, flags); spin_lock_irqsave(&port->adapter->lock, flags);
netxen_nic_close(netdev); netxen_nic_close(netdev);
netxen_nic_open(netdev); netxen_nic_open(netdev);
spin_unlock_irqrestore(&adapter->lock, flags); spin_unlock_irqrestore(&port->adapter->lock, flags);
netdev->trans_start = jiffies; netdev->trans_start = jiffies;
netif_wake_queue(netdev); netif_wake_queue(netdev);
} }
......
...@@ -225,7 +225,6 @@ MODULE_DEVICE_TABLE(pci, rtl8169_pci_tbl); ...@@ -225,7 +225,6 @@ MODULE_DEVICE_TABLE(pci, rtl8169_pci_tbl);
static int rx_copybreak = 200; static int rx_copybreak = 200;
static int use_dac; static int use_dac;
static int ignore_parity_err;
static struct { static struct {
u32 msg_enable; u32 msg_enable;
} debug = { -1 }; } debug = { -1 };
...@@ -471,8 +470,6 @@ module_param(use_dac, int, 0); ...@@ -471,8 +470,6 @@ module_param(use_dac, int, 0);
MODULE_PARM_DESC(use_dac, "Enable PCI DAC. Unsafe on 32 bit PCI slot."); MODULE_PARM_DESC(use_dac, "Enable PCI DAC. Unsafe on 32 bit PCI slot.");
module_param_named(debug, debug.msg_enable, int, 0); module_param_named(debug, debug.msg_enable, int, 0);
MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 16=all)"); MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 16=all)");
module_param_named(ignore_parity_err, ignore_parity_err, bool, 0);
MODULE_PARM_DESC(ignore_parity_err, "Ignore PCI parity error as target. Default: false");
MODULE_LICENSE("GPL"); MODULE_LICENSE("GPL");
MODULE_VERSION(RTL8169_VERSION); MODULE_VERSION(RTL8169_VERSION);
...@@ -1885,7 +1882,6 @@ static void rtl8169_hw_start(struct net_device *dev) ...@@ -1885,7 +1882,6 @@ static void rtl8169_hw_start(struct net_device *dev)
(tp->mac_version == RTL_GIGA_MAC_VER_02) || (tp->mac_version == RTL_GIGA_MAC_VER_02) ||
(tp->mac_version == RTL_GIGA_MAC_VER_03) || (tp->mac_version == RTL_GIGA_MAC_VER_03) ||
(tp->mac_version == RTL_GIGA_MAC_VER_04)) (tp->mac_version == RTL_GIGA_MAC_VER_04))
RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
rtl8169_set_rx_tx_config_registers(tp); rtl8169_set_rx_tx_config_registers(tp);
cmd = RTL_R16(CPlusCmd); cmd = RTL_R16(CPlusCmd);
...@@ -2388,7 +2384,7 @@ static void rtl8169_pcierr_interrupt(struct net_device *dev) ...@@ -2388,7 +2384,7 @@ static void rtl8169_pcierr_interrupt(struct net_device *dev)
* *
* Feel free to adjust to your needs. * Feel free to adjust to your needs.
*/ */
if (ignore_parity_err) if (pdev->broken_parity_status)
pci_cmd &= ~PCI_COMMAND_PARITY; pci_cmd &= ~PCI_COMMAND_PARITY;
else else
pci_cmd |= PCI_COMMAND_SERR | PCI_COMMAND_PARITY; pci_cmd |= PCI_COMMAND_SERR | PCI_COMMAND_PARITY;
......
...@@ -2920,6 +2920,7 @@ static int skge_poll(struct net_device *dev, int *budget) ...@@ -2920,6 +2920,7 @@ static int skge_poll(struct net_device *dev, int *budget)
struct skge_hw *hw = skge->hw; struct skge_hw *hw = skge->hw;
struct skge_ring *ring = &skge->rx_ring; struct skge_ring *ring = &skge->rx_ring;
struct skge_element *e; struct skge_element *e;
unsigned long flags;
int to_do = min(dev->quota, *budget); int to_do = min(dev->quota, *budget);
int work_done = 0; int work_done = 0;
...@@ -2957,12 +2958,12 @@ static int skge_poll(struct net_device *dev, int *budget) ...@@ -2957,12 +2958,12 @@ static int skge_poll(struct net_device *dev, int *budget)
if (work_done >= to_do) if (work_done >= to_do)
return 1; /* not done */ return 1; /* not done */
spin_lock_irq(&hw->hw_lock); spin_lock_irqsave(&hw->hw_lock, flags);
__netif_rx_complete(dev); __netif_rx_complete(dev);
hw->intr_mask |= irqmask[skge->port]; hw->intr_mask |= irqmask[skge->port];
skge_write32(hw, B0_IMSK, hw->intr_mask); skge_write32(hw, B0_IMSK, hw->intr_mask);
skge_read32(hw, B0_IMSK); skge_read32(hw, B0_IMSK);
spin_unlock_irq(&hw->hw_lock); spin_unlock_irqrestore(&hw->hw_lock, flags);
return 0; return 0;
} }
......
...@@ -569,8 +569,8 @@ static void sky2_phy_power(struct sky2_hw *hw, unsigned port, int onoff) ...@@ -569,8 +569,8 @@ static void sky2_phy_power(struct sky2_hw *hw, unsigned port, int onoff)
if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > 1) if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > 1)
onoff = !onoff; onoff = !onoff;
sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
reg1 = sky2_pci_read32(hw, PCI_DEV_REG1); reg1 = sky2_pci_read32(hw, PCI_DEV_REG1);
if (onoff) if (onoff)
/* Turn off phy power saving */ /* Turn off phy power saving */
reg1 &= ~phy_power[port]; reg1 &= ~phy_power[port];
...@@ -579,6 +579,7 @@ static void sky2_phy_power(struct sky2_hw *hw, unsigned port, int onoff) ...@@ -579,6 +579,7 @@ static void sky2_phy_power(struct sky2_hw *hw, unsigned port, int onoff)
sky2_pci_write32(hw, PCI_DEV_REG1, reg1); sky2_pci_write32(hw, PCI_DEV_REG1, reg1);
sky2_pci_read32(hw, PCI_DEV_REG1); sky2_pci_read32(hw, PCI_DEV_REG1);
sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
udelay(100); udelay(100);
} }
...@@ -1511,6 +1512,13 @@ static int sky2_down(struct net_device *dev) ...@@ -1511,6 +1512,13 @@ static int sky2_down(struct net_device *dev)
imask &= ~portirq_msk[port]; imask &= ~portirq_msk[port];
sky2_write32(hw, B0_IMSK, imask); sky2_write32(hw, B0_IMSK, imask);
/*
* Both ports share the NAPI poll on port 0, so if necessary undo the
* the disable that is done in dev_close.
*/
if (sky2->port == 0 && hw->ports > 1)
netif_poll_enable(dev);
sky2_gmac_reset(hw, port); sky2_gmac_reset(hw, port);
/* Stop transmitter */ /* Stop transmitter */
...@@ -3631,6 +3639,29 @@ static int sky2_resume(struct pci_dev *pdev) ...@@ -3631,6 +3639,29 @@ static int sky2_resume(struct pci_dev *pdev)
out: out:
return err; return err;
} }
/* BIOS resume runs after device (it's a bug in PM)
* as a temporary workaround on suspend/resume leave MSI disabled
*/
static int sky2_suspend_late(struct pci_dev *pdev, pm_message_t state)
{
struct sky2_hw *hw = pci_get_drvdata(pdev);
free_irq(pdev->irq, hw);
if (hw->msi) {
pci_disable_msi(pdev);
hw->msi = 0;
}
return 0;
}
static int sky2_resume_early(struct pci_dev *pdev)
{
struct sky2_hw *hw = pci_get_drvdata(pdev);
struct net_device *dev = hw->dev[0];
return request_irq(pdev->irq, sky2_intr, IRQF_SHARED, dev->name, hw);
}
#endif #endif
static struct pci_driver sky2_driver = { static struct pci_driver sky2_driver = {
...@@ -3641,6 +3672,8 @@ static struct pci_driver sky2_driver = { ...@@ -3641,6 +3672,8 @@ static struct pci_driver sky2_driver = {
#ifdef CONFIG_PM #ifdef CONFIG_PM
.suspend = sky2_suspend, .suspend = sky2_suspend,
.resume = sky2_resume, .resume = sky2_resume,
.suspend_late = sky2_suspend_late,
.resume_early = sky2_resume_early,
#endif #endif
}; };
......
...@@ -265,15 +265,19 @@ static int velocity_set_media_mode(struct velocity_info *vptr, u32 mii_status); ...@@ -265,15 +265,19 @@ static int velocity_set_media_mode(struct velocity_info *vptr, u32 mii_status);
static int velocity_suspend(struct pci_dev *pdev, pm_message_t state); static int velocity_suspend(struct pci_dev *pdev, pm_message_t state);
static int velocity_resume(struct pci_dev *pdev); static int velocity_resume(struct pci_dev *pdev);
static DEFINE_SPINLOCK(velocity_dev_list_lock);
static LIST_HEAD(velocity_dev_list);
#endif
#if defined(CONFIG_PM) && defined(CONFIG_INET)
static int velocity_netdev_event(struct notifier_block *nb, unsigned long notification, void *ptr); static int velocity_netdev_event(struct notifier_block *nb, unsigned long notification, void *ptr);
static struct notifier_block velocity_inetaddr_notifier = { static struct notifier_block velocity_inetaddr_notifier = {
.notifier_call = velocity_netdev_event, .notifier_call = velocity_netdev_event,
}; };
static DEFINE_SPINLOCK(velocity_dev_list_lock);
static LIST_HEAD(velocity_dev_list);
static void velocity_register_notifier(void) static void velocity_register_notifier(void)
{ {
register_inetaddr_notifier(&velocity_inetaddr_notifier); register_inetaddr_notifier(&velocity_inetaddr_notifier);
...@@ -284,12 +288,12 @@ static void velocity_unregister_notifier(void) ...@@ -284,12 +288,12 @@ static void velocity_unregister_notifier(void)
unregister_inetaddr_notifier(&velocity_inetaddr_notifier); unregister_inetaddr_notifier(&velocity_inetaddr_notifier);
} }
#else /* CONFIG_PM */ #else
#define velocity_register_notifier() do {} while (0) #define velocity_register_notifier() do {} while (0)
#define velocity_unregister_notifier() do {} while (0) #define velocity_unregister_notifier() do {} while (0)
#endif /* !CONFIG_PM */ #endif
/* /*
* Internal board variants. At the moment we have only one * Internal board variants. At the moment we have only one
...@@ -3292,6 +3296,8 @@ static int velocity_resume(struct pci_dev *pdev) ...@@ -3292,6 +3296,8 @@ static int velocity_resume(struct pci_dev *pdev)
return 0; return 0;
} }
#ifdef CONFIG_INET
static int velocity_netdev_event(struct notifier_block *nb, unsigned long notification, void *ptr) static int velocity_netdev_event(struct notifier_block *nb, unsigned long notification, void *ptr)
{ {
struct in_ifaddr *ifa = (struct in_ifaddr *) ptr; struct in_ifaddr *ifa = (struct in_ifaddr *) ptr;
...@@ -3312,4 +3318,6 @@ static int velocity_netdev_event(struct notifier_block *nb, unsigned long notifi ...@@ -3312,4 +3318,6 @@ static int velocity_netdev_event(struct notifier_block *nb, unsigned long notifi
} }
return NOTIFY_DONE; return NOTIFY_DONE;
} }
#endif
#endif #endif
...@@ -41,6 +41,8 @@ static void housekeeping_disable(struct zd_mac *mac); ...@@ -41,6 +41,8 @@ static void housekeeping_disable(struct zd_mac *mac);
static void set_multicast_hash_handler(struct work_struct *work); static void set_multicast_hash_handler(struct work_struct *work);
static void do_rx(unsigned long mac_ptr);
int zd_mac_init(struct zd_mac *mac, int zd_mac_init(struct zd_mac *mac,
struct net_device *netdev, struct net_device *netdev,
struct usb_interface *intf) struct usb_interface *intf)
...@@ -53,6 +55,10 @@ int zd_mac_init(struct zd_mac *mac, ...@@ -53,6 +55,10 @@ int zd_mac_init(struct zd_mac *mac,
INIT_DELAYED_WORK(&mac->set_rts_cts_work, set_rts_cts_work); INIT_DELAYED_WORK(&mac->set_rts_cts_work, set_rts_cts_work);
INIT_DELAYED_WORK(&mac->set_basic_rates_work, set_basic_rates_work); INIT_DELAYED_WORK(&mac->set_basic_rates_work, set_basic_rates_work);
skb_queue_head_init(&mac->rx_queue);
tasklet_init(&mac->rx_tasklet, do_rx, (unsigned long)mac);
tasklet_disable(&mac->rx_tasklet);
ieee_init(ieee); ieee_init(ieee);
softmac_init(ieee80211_priv(netdev)); softmac_init(ieee80211_priv(netdev));
zd_chip_init(&mac->chip, netdev, intf); zd_chip_init(&mac->chip, netdev, intf);
...@@ -140,6 +146,8 @@ int zd_mac_init_hw(struct zd_mac *mac, u8 device_type) ...@@ -140,6 +146,8 @@ int zd_mac_init_hw(struct zd_mac *mac, u8 device_type)
void zd_mac_clear(struct zd_mac *mac) void zd_mac_clear(struct zd_mac *mac)
{ {
flush_workqueue(zd_workqueue); flush_workqueue(zd_workqueue);
skb_queue_purge(&mac->rx_queue);
tasklet_kill(&mac->rx_tasklet);
zd_chip_clear(&mac->chip); zd_chip_clear(&mac->chip);
ZD_ASSERT(!spin_is_locked(&mac->lock)); ZD_ASSERT(!spin_is_locked(&mac->lock));
ZD_MEMCLEAR(mac, sizeof(struct zd_mac)); ZD_MEMCLEAR(mac, sizeof(struct zd_mac));
...@@ -168,6 +176,8 @@ int zd_mac_open(struct net_device *netdev) ...@@ -168,6 +176,8 @@ int zd_mac_open(struct net_device *netdev)
struct zd_chip *chip = &mac->chip; struct zd_chip *chip = &mac->chip;
int r; int r;
tasklet_enable(&mac->rx_tasklet);
r = zd_chip_enable_int(chip); r = zd_chip_enable_int(chip);
if (r < 0) if (r < 0)
goto out; goto out;
...@@ -218,6 +228,8 @@ int zd_mac_stop(struct net_device *netdev) ...@@ -218,6 +228,8 @@ int zd_mac_stop(struct net_device *netdev)
*/ */
zd_chip_disable_rx(chip); zd_chip_disable_rx(chip);
skb_queue_purge(&mac->rx_queue);
tasklet_disable(&mac->rx_tasklet);
housekeeping_disable(mac); housekeeping_disable(mac);
ieee80211softmac_stop(netdev); ieee80211softmac_stop(netdev);
...@@ -470,13 +482,13 @@ static void bssinfo_change(struct net_device *netdev, u32 changes) ...@@ -470,13 +482,13 @@ static void bssinfo_change(struct net_device *netdev, u32 changes)
if (changes & IEEE80211SOFTMAC_BSSINFOCHG_RATES) { if (changes & IEEE80211SOFTMAC_BSSINFOCHG_RATES) {
/* Set RTS rate to highest available basic rate */ /* Set RTS rate to highest available basic rate */
u8 rate = ieee80211softmac_highest_supported_rate(softmac, u8 hi_rate = ieee80211softmac_highest_supported_rate(softmac,
&bssinfo->supported_rates, 1); &bssinfo->supported_rates, 1);
rate = rate_to_zd_rate(rate); hi_rate = rate_to_zd_rate(hi_rate);
spin_lock_irqsave(&mac->lock, flags); spin_lock_irqsave(&mac->lock, flags);
if (rate != mac->rts_rate) { if (hi_rate != mac->rts_rate) {
mac->rts_rate = rate; mac->rts_rate = hi_rate;
need_set_rts_cts = 1; need_set_rts_cts = 1;
} }
spin_unlock_irqrestore(&mac->lock, flags); spin_unlock_irqrestore(&mac->lock, flags);
...@@ -1072,43 +1084,75 @@ static int fill_rx_stats(struct ieee80211_rx_stats *stats, ...@@ -1072,43 +1084,75 @@ static int fill_rx_stats(struct ieee80211_rx_stats *stats,
return 0; return 0;
} }
int zd_mac_rx(struct zd_mac *mac, const u8 *buffer, unsigned int length) static void zd_mac_rx(struct zd_mac *mac, struct sk_buff *skb)
{ {
int r; int r;
struct ieee80211_device *ieee = zd_mac_to_ieee80211(mac); struct ieee80211_device *ieee = zd_mac_to_ieee80211(mac);
struct ieee80211_rx_stats stats; struct ieee80211_rx_stats stats;
const struct rx_status *status; const struct rx_status *status;
struct sk_buff *skb;
if (length < ZD_PLCP_HEADER_SIZE + IEEE80211_1ADDR_LEN + if (skb->len < ZD_PLCP_HEADER_SIZE + IEEE80211_1ADDR_LEN +
IEEE80211_FCS_LEN + sizeof(struct rx_status)) IEEE80211_FCS_LEN + sizeof(struct rx_status))
return -EINVAL; {
dev_dbg_f(zd_mac_dev(mac), "Packet with length %u to small.\n",
skb->len);
goto free_skb;
}
r = fill_rx_stats(&stats, &status, mac, buffer, length); r = fill_rx_stats(&stats, &status, mac, skb->data, skb->len);
if (r) if (r) {
return r; /* Only packets with rx errors are included here. */
goto free_skb;
}
length -= ZD_PLCP_HEADER_SIZE+IEEE80211_FCS_LEN+ __skb_pull(skb, ZD_PLCP_HEADER_SIZE);
sizeof(struct rx_status); __skb_trim(skb, skb->len -
buffer += ZD_PLCP_HEADER_SIZE; (IEEE80211_FCS_LEN + sizeof(struct rx_status)));
update_qual_rssi(mac, buffer, length, stats.signal, stats.rssi); update_qual_rssi(mac, skb->data, skb->len, stats.signal,
status->signal_strength);
r = filter_rx(ieee, buffer, length, &stats); r = filter_rx(ieee, skb->data, skb->len, &stats);
if (r <= 0) if (r <= 0) {
return r; if (r < 0)
dev_dbg_f(zd_mac_dev(mac), "Error in packet.\n");
goto free_skb;
}
skb = dev_alloc_skb(sizeof(struct zd_rt_hdr) + length);
if (!skb)
return -ENOMEM;
if (ieee->iw_mode == IW_MODE_MONITOR) if (ieee->iw_mode == IW_MODE_MONITOR)
fill_rt_header(skb_put(skb, sizeof(struct zd_rt_hdr)), mac, fill_rt_header(skb_push(skb, sizeof(struct zd_rt_hdr)), mac,
&stats, status); &stats, status);
memcpy(skb_put(skb, length), buffer, length);
r = ieee80211_rx(ieee, skb, &stats); r = ieee80211_rx(ieee, skb, &stats);
if (!r) if (r)
dev_kfree_skb_any(skb); return;
free_skb:
/* We are always in a soft irq. */
dev_kfree_skb(skb);
}
static void do_rx(unsigned long mac_ptr)
{
struct zd_mac *mac = (struct zd_mac *)mac_ptr;
struct sk_buff *skb;
while ((skb = skb_dequeue(&mac->rx_queue)) != NULL)
zd_mac_rx(mac, skb);
}
int zd_mac_rx_irq(struct zd_mac *mac, const u8 *buffer, unsigned int length)
{
struct sk_buff *skb;
skb = dev_alloc_skb(sizeof(struct zd_rt_hdr) + length);
if (!skb) {
dev_warn(zd_mac_dev(mac), "Could not allocate skb.\n");
return -ENOMEM;
}
skb_reserve(skb, sizeof(struct zd_rt_hdr));
memcpy(__skb_put(skb, length), buffer, length);
skb_queue_tail(&mac->rx_queue, skb);
tasklet_schedule(&mac->rx_tasklet);
return 0; return 0;
} }
......
...@@ -138,6 +138,9 @@ struct zd_mac { ...@@ -138,6 +138,9 @@ struct zd_mac {
struct delayed_work set_rts_cts_work; struct delayed_work set_rts_cts_work;
struct delayed_work set_basic_rates_work; struct delayed_work set_basic_rates_work;
struct tasklet_struct rx_tasklet;
struct sk_buff_head rx_queue;
unsigned int stats_count; unsigned int stats_count;
u8 qual_buffer[ZD_MAC_STATS_BUFFER_SIZE]; u8 qual_buffer[ZD_MAC_STATS_BUFFER_SIZE];
u8 rssi_buffer[ZD_MAC_STATS_BUFFER_SIZE]; u8 rssi_buffer[ZD_MAC_STATS_BUFFER_SIZE];
...@@ -193,7 +196,7 @@ int zd_mac_stop(struct net_device *netdev); ...@@ -193,7 +196,7 @@ int zd_mac_stop(struct net_device *netdev);
int zd_mac_set_mac_address(struct net_device *dev, void *p); int zd_mac_set_mac_address(struct net_device *dev, void *p);
void zd_mac_set_multicast_list(struct net_device *netdev); void zd_mac_set_multicast_list(struct net_device *netdev);
int zd_mac_rx(struct zd_mac *mac, const u8 *buffer, unsigned int length); int zd_mac_rx_irq(struct zd_mac *mac, const u8 *buffer, unsigned int length);
int zd_mac_set_regdomain(struct zd_mac *zd_mac, u8 regdomain); int zd_mac_set_regdomain(struct zd_mac *zd_mac, u8 regdomain);
u8 zd_mac_get_regdomain(struct zd_mac *zd_mac); u8 zd_mac_get_regdomain(struct zd_mac *zd_mac);
......
...@@ -598,13 +598,13 @@ static void handle_rx_packet(struct zd_usb *usb, const u8 *buffer, ...@@ -598,13 +598,13 @@ static void handle_rx_packet(struct zd_usb *usb, const u8 *buffer,
n = l+k; n = l+k;
if (n > length) if (n > length)
return; return;
zd_mac_rx(mac, buffer+l, k); zd_mac_rx_irq(mac, buffer+l, k);
if (i >= 2) if (i >= 2)
return; return;
l = (n+3) & ~3; l = (n+3) & ~3;
} }
} else { } else {
zd_mac_rx(mac, buffer, length); zd_mac_rx_irq(mac, buffer, length);
} }
} }
......
...@@ -167,7 +167,7 @@ static void ...@@ -167,7 +167,7 @@ static void
ieee80211softmac_assoc_notify_scan(struct net_device *dev, int event_type, void *context) ieee80211softmac_assoc_notify_scan(struct net_device *dev, int event_type, void *context)
{ {
struct ieee80211softmac_device *mac = ieee80211_priv(dev); struct ieee80211softmac_device *mac = ieee80211_priv(dev);
ieee80211softmac_assoc_work((void*)mac); ieee80211softmac_assoc_work(&mac->associnfo.work.work);
} }
static void static void
...@@ -177,7 +177,7 @@ ieee80211softmac_assoc_notify_auth(struct net_device *dev, int event_type, void ...@@ -177,7 +177,7 @@ ieee80211softmac_assoc_notify_auth(struct net_device *dev, int event_type, void
switch (event_type) { switch (event_type) {
case IEEE80211SOFTMAC_EVENT_AUTHENTICATED: case IEEE80211SOFTMAC_EVENT_AUTHENTICATED:
ieee80211softmac_assoc_work((void*)mac); ieee80211softmac_assoc_work(&mac->associnfo.work.work);
break; break;
case IEEE80211SOFTMAC_EVENT_AUTH_FAILED: case IEEE80211SOFTMAC_EVENT_AUTH_FAILED:
case IEEE80211SOFTMAC_EVENT_AUTH_TIMEOUT: case IEEE80211SOFTMAC_EVENT_AUTH_TIMEOUT:
......
...@@ -463,7 +463,7 @@ ieee80211softmac_wx_get_genie(struct net_device *dev, ...@@ -463,7 +463,7 @@ ieee80211softmac_wx_get_genie(struct net_device *dev,
err = -E2BIG; err = -E2BIG;
} }
spin_unlock_irqrestore(&mac->lock, flags); spin_unlock_irqrestore(&mac->lock, flags);
mutex_lock(&mac->associnfo.mutex); mutex_unlock(&mac->associnfo.mutex);
return err; return err;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment