Commit 1024362a authored by Tim Hockin's avatar Tim Hockin

Merge freakshow.cobalt.com:/home/th122948/bk/vanilla/virgin-net-drivers-2.5

into freakshow.cobalt.com:/home/th122948/bk/net-drivers-2.5
parents fe9b9e34 0e70266c
/* natsemi.c: A Linux PCI Ethernet driver for the NatSemi DP8381x series. */ /* natsemi.c: A Linux PCI Ethernet driver for the NatSemi DP8381x series. */
/* /*
Written/copyright 1999-2001 by Donald Becker. Written/copyright 1999-2001 by Donald Becker.
Portions copyright (c) 2001 Sun Microsystems (thockin@sun.com) Portions copyright (c) 2001,2002 Sun Microsystems (thockin@sun.com)
Portions copyright 2001,2002 Manfred Spraul (manfred@colorfullife.com)
This software may be used and distributed according to the terms of This software may be used and distributed according to the terms of
the GNU General Public License (GPL), incorporated herein by reference. the GNU General Public License (GPL), incorporated herein by reference.
...@@ -119,10 +120,23 @@ ...@@ -119,10 +120,23 @@
initialized initialized
* enable only the WoL and PHY interrupts in wol mode * enable only the WoL and PHY interrupts in wol mode
version 1.0.17:
* only do cable_magic on 83815 and early 83816 (Tim Hockin)
* create a function for rx refill (Manfred Spraul)
* combine drain_ring and init_ring (Manfred Spraul)
* oom handling (Manfred Spraul)
* hands_off instead of playing with netif_device_{de,a}ttach
(Manfred Spraul)
* be sure to write the MAC back to the chip (Manfred Spraul)
* lengthen EEPROM timeout, and always warn about timeouts
(Manfred Spraul)
* comments update (Manfred)
* do the right thing on a phy-reset (Manfred and Tim)
TODO: TODO:
* big endian support with CFG:BEM instead of cpu_to_le32 * big endian support with CFG:BEM instead of cpu_to_le32
* support for an external PHY * support for an external PHY
* flow control * NAPI
*/ */
#if !defined(__OPTIMIZE__) #if !defined(__OPTIMIZE__)
...@@ -157,23 +171,11 @@ ...@@ -157,23 +171,11 @@
#include <asm/uaccess.h> #include <asm/uaccess.h>
#define DRV_NAME "natsemi" #define DRV_NAME "natsemi"
#define DRV_VERSION "1.07+LK1.0.16" #define DRV_VERSION "1.07+LK1.0.17"
#define DRV_RELDATE "Aug 28, 2002" #define DRV_RELDATE "Sep 27, 2002"
/* Updated to recommendations in pci-skeleton v2.03. */ /* Updated to recommendations in pci-skeleton v2.03. */
/* Automatically extracted configuration info:
probe-func: natsemi_probe
config-in: tristate 'National Semiconductor DP8381x series PCI Ethernet support' CONFIG_NATSEMI
c-help-name: National Semiconductor DP8381x series PCI Ethernet support
c-help-symbol: CONFIG_NATSEMI
c-help: This driver is for the National Semiconductor DP8381x series,
c-help: including the 8381[56] chips.
c-help: More specific information and updates are available from
c-help: http://www.scyld.com/network/natsemi.html
*/
/* The user-configurable values. /* The user-configurable values.
These may be modified when a driver module is loaded.*/ These may be modified when a driver module is loaded.*/
...@@ -235,9 +237,11 @@ static int full_duplex[MAX_UNITS]; ...@@ -235,9 +237,11 @@ static int full_duplex[MAX_UNITS];
/* These identify the driver base version and may not be removed. */ /* These identify the driver base version and may not be removed. */
static char version[] __devinitdata = static char version[] __devinitdata =
KERN_INFO DRV_NAME ".c:v1.07 1/9/2001 Written by Donald Becker <becker@scyld.com>\n" KERN_INFO DRV_NAME " dp8381x driver, version "
KERN_INFO " http://www.scyld.com/network/natsemi.html\n" DRV_VERSION ", " DRV_RELDATE "\n"
KERN_INFO " (unofficial 2.4.x kernel port, version " DRV_VERSION ", " DRV_RELDATE " Jeff Garzik, Tjeerd Mulder)\n"; KERN_INFO " originally by Donald Becker <becker@scyld.com>\n"
KERN_INFO " http://www.scyld.com/network/natsemi.html\n"
KERN_INFO " 2.4.x kernel port by Jeff Garzik, Tjeerd Mulder\n";
MODULE_AUTHOR("Donald Becker <becker@scyld.com>"); MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
MODULE_DESCRIPTION("National Semiconductor DP8381x series PCI Ethernet driver"); MODULE_DESCRIPTION("National Semiconductor DP8381x series PCI Ethernet driver");
...@@ -249,11 +253,14 @@ MODULE_PARM(debug, "i"); ...@@ -249,11 +253,14 @@ MODULE_PARM(debug, "i");
MODULE_PARM(rx_copybreak, "i"); MODULE_PARM(rx_copybreak, "i");
MODULE_PARM(options, "1-" __MODULE_STRING(MAX_UNITS) "i"); MODULE_PARM(options, "1-" __MODULE_STRING(MAX_UNITS) "i");
MODULE_PARM(full_duplex, "1-" __MODULE_STRING(MAX_UNITS) "i"); MODULE_PARM(full_duplex, "1-" __MODULE_STRING(MAX_UNITS) "i");
MODULE_PARM_DESC(max_interrupt_work, "DP8381x maximum events handled per interrupt"); MODULE_PARM_DESC(max_interrupt_work,
"DP8381x maximum events handled per interrupt");
MODULE_PARM_DESC(mtu, "DP8381x MTU (all boards)"); MODULE_PARM_DESC(mtu, "DP8381x MTU (all boards)");
MODULE_PARM_DESC(debug, "DP8381x default debug bitmask"); MODULE_PARM_DESC(debug, "DP8381x default debug bitmask");
MODULE_PARM_DESC(rx_copybreak, "DP8381x copy breakpoint for copy-only-tiny-frames"); MODULE_PARM_DESC(rx_copybreak,
MODULE_PARM_DESC(options, "DP8381x: Bits 0-3: media type, bit 17: full duplex"); "DP8381x copy breakpoint for copy-only-tiny-frames");
MODULE_PARM_DESC(options,
"DP8381x: Bits 0-3: media type, bit 17: full duplex");
MODULE_PARM_DESC(full_duplex, "DP8381x full duplex setting(s) (1)"); MODULE_PARM_DESC(full_duplex, "DP8381x full duplex setting(s) (1)");
/* /*
...@@ -306,20 +313,19 @@ skbuff at an offset of "+2", 16-byte aligning the IP header. ...@@ -306,20 +313,19 @@ skbuff at an offset of "+2", 16-byte aligning the IP header.
IIId. Synchronization IIId. Synchronization
The driver runs as two independent, single-threaded flows of control. One Most operations are synchronized on the np->lock irq spinlock, except the
is the send-packet routine, which enforces single-threaded use by the performance critical codepaths:
dev->tbusy flag. The other thread is the interrupt handler, which is single
threaded by the hardware and interrupt handling software. The rx process only runs in the interrupt handler. Access from outside
the interrupt handler is only permitted after disable_irq().
The send packet thread has partial control over the Tx ring and 'dev->tbusy' The rx process usually runs under the dev->xmit_lock. If np->intr_tx_reap
flag. It sets the tbusy flag whenever it's queuing a Tx packet. If the next is set, then access is permitted under spin_lock_irq(&np->lock).
queue slot is empty, it clears the tbusy flag when finished otherwise it sets
the 'lp->tx_full' flag.
The interrupt handler has exclusive control over the Rx ring and records stats Thus configuration functions that want to access everything must call
from the Tx ring. After reaping the stats, it marks the Tx queue entry as disable_irq(dev->irq);
empty by incrementing the dirty_tx mark. Iff the 'lp->tx_full' flag is set, it spin_lock_bh(dev->xmit_lock);
clears both the tx_full and tbusy flags. spin_lock_irq(&np->lock);
IV. Notes IV. Notes
...@@ -337,7 +343,7 @@ IVc. Errata ...@@ -337,7 +343,7 @@ IVc. Errata
None characterised. None characterised.
*/ */
enum pcistuff { enum pcistuff {
PCI_USES_IO = 0x01, PCI_USES_IO = 0x01,
...@@ -378,6 +384,7 @@ enum register_offsets { ...@@ -378,6 +384,7 @@ enum register_offsets {
IntrStatus = 0x10, IntrStatus = 0x10,
IntrMask = 0x14, IntrMask = 0x14,
IntrEnable = 0x18, IntrEnable = 0x18,
IntrHoldoff = 0x16, /* DP83816 only */
TxRingPtr = 0x20, TxRingPtr = 0x20,
TxConfig = 0x24, TxConfig = 0x24,
RxRingPtr = 0x30, RxRingPtr = 0x30,
...@@ -587,8 +594,11 @@ enum PhyCtrl_bits { ...@@ -587,8 +594,11 @@ enum PhyCtrl_bits {
PhyAddrMask = 0xf, PhyAddrMask = 0xf,
}; };
#define SRR_REV_C 0x0302 /* values we might find in the silicon revision register */
#define SRR_REV_D 0x0403 #define SRR_DP83815_C 0x0302
#define SRR_DP83815_D 0x0403
#define SRR_DP83816_A4 0x0504
#define SRR_DP83816_A5 0x0505
/* The Rx and Tx buffer descriptors. */ /* The Rx and Tx buffer descriptors. */
/* Note that using only 32 bit fields simplifies conversion to big-endian /* Note that using only 32 bit fields simplifies conversion to big-endian
...@@ -619,30 +629,36 @@ enum desc_status_bits { ...@@ -619,30 +629,36 @@ enum desc_status_bits {
}; };
struct netdev_private { struct netdev_private {
/* Descriptor rings first for alignment. */ /* Descriptor rings first for alignment */
dma_addr_t ring_dma; dma_addr_t ring_dma;
struct netdev_desc* rx_ring; struct netdev_desc *rx_ring;
struct netdev_desc* tx_ring; struct netdev_desc *tx_ring;
/* The addresses of receive-in-place skbuffs. */ /* The addresses of receive-in-place skbuffs */
struct sk_buff* rx_skbuff[RX_RING_SIZE]; struct sk_buff *rx_skbuff[RX_RING_SIZE];
dma_addr_t rx_dma[RX_RING_SIZE]; dma_addr_t rx_dma[RX_RING_SIZE];
/* The saved address of a sent-in-place packet/buffer, for later free(). */ /* address of a sent-in-place packet/buffer, for later free() */
struct sk_buff* tx_skbuff[TX_RING_SIZE]; struct sk_buff *tx_skbuff[TX_RING_SIZE];
dma_addr_t tx_dma[TX_RING_SIZE]; dma_addr_t tx_dma[TX_RING_SIZE];
struct net_device_stats stats; struct net_device_stats stats;
struct timer_list timer; /* Media monitoring timer. */ /* Media monitoring timer */
/* Frequently used values: keep some adjacent for cache effect. */ struct timer_list timer;
/* Frequently used values: keep some adjacent for cache effect */
struct pci_dev *pci_dev; struct pci_dev *pci_dev;
struct netdev_desc *rx_head_desc; struct netdev_desc *rx_head_desc;
unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */ /* Producer/consumer ring indices */
unsigned int cur_rx, dirty_rx;
unsigned int cur_tx, dirty_tx; unsigned int cur_tx, dirty_tx;
unsigned int rx_buf_sz; /* Based on MTU+slack. */ /* Based on MTU+slack. */
/* These values are keep track of the transceiver/media in use. */ unsigned int rx_buf_sz;
int oom;
/* Do not touch the nic registers */
int hands_off;
/* These values are keep track of the transceiver/media in use */
unsigned int full_duplex; unsigned int full_duplex;
/* Rx filter. */ /* Rx filter */
u32 cur_rx_mode; u32 cur_rx_mode;
u32 rx_filter[16]; u32 rx_filter[16];
/* FIFO and PCI burst thresholds. */ /* FIFO and PCI burst thresholds */
u32 tx_config, rx_config; u32 tx_config, rx_config;
/* original contents of ClkRun register */ /* original contents of ClkRun register */
u32 SavedClkRun; u32 SavedClkRun;
...@@ -650,8 +666,8 @@ struct netdev_private { ...@@ -650,8 +666,8 @@ struct netdev_private {
u32 srr; u32 srr;
/* expected DSPCFG value */ /* expected DSPCFG value */
u16 dspcfg; u16 dspcfg;
/* MII transceiver section. */ /* MII transceiver section */
u16 advertising; /* NWay media advertisement */ u16 advertising;
unsigned int iosize; unsigned int iosize;
spinlock_t lock; spinlock_t lock;
u32 msg_enable; u32 msg_enable;
...@@ -668,11 +684,15 @@ static void do_cable_magic(struct net_device *dev); ...@@ -668,11 +684,15 @@ static void do_cable_magic(struct net_device *dev);
static void undo_cable_magic(struct net_device *dev); static void undo_cable_magic(struct net_device *dev);
static void check_link(struct net_device *dev); static void check_link(struct net_device *dev);
static void netdev_timer(unsigned long data); static void netdev_timer(unsigned long data);
static void dump_ring(struct net_device *dev);
static void tx_timeout(struct net_device *dev); static void tx_timeout(struct net_device *dev);
static int alloc_ring(struct net_device *dev); static int alloc_ring(struct net_device *dev);
static void refill_rx(struct net_device *dev);
static void init_ring(struct net_device *dev); static void init_ring(struct net_device *dev);
static void drain_tx(struct net_device *dev);
static void drain_ring(struct net_device *dev); static void drain_ring(struct net_device *dev);
static void free_ring(struct net_device *dev); static void free_ring(struct net_device *dev);
static void reinit_ring(struct net_device *dev);
static void init_registers(struct net_device *dev); static void init_registers(struct net_device *dev);
static int start_tx(struct sk_buff *skb, struct net_device *dev); static int start_tx(struct sk_buff *skb, struct net_device *dev);
static void intr_handler(int irq, void *dev_instance, struct pt_regs *regs); static void intr_handler(int irq, void *dev_instance, struct pt_regs *regs);
...@@ -695,7 +715,7 @@ static int netdev_close(struct net_device *dev); ...@@ -695,7 +715,7 @@ static int netdev_close(struct net_device *dev);
static int netdev_get_regs(struct net_device *dev, u8 *buf); static int netdev_get_regs(struct net_device *dev, u8 *buf);
static int netdev_get_eeprom(struct net_device *dev, u8 *buf); static int netdev_get_eeprom(struct net_device *dev, u8 *buf);
static int __devinit natsemi_probe1 (struct pci_dev *pdev, static int __devinit natsemi_probe1 (struct pci_dev *pdev,
const struct pci_device_id *ent) const struct pci_device_id *ent)
{ {
...@@ -777,6 +797,7 @@ static int __devinit natsemi_probe1 (struct pci_dev *pdev, ...@@ -777,6 +797,7 @@ static int __devinit natsemi_probe1 (struct pci_dev *pdev,
np->iosize = iosize; np->iosize = iosize;
spin_lock_init(&np->lock); spin_lock_init(&np->lock);
np->msg_enable = debug; np->msg_enable = debug;
np->hands_off = 0;
/* Reset the chip to erase previous misconfiguration. */ /* Reset the chip to erase previous misconfiguration. */
natsemi_reload_eeprom(dev); natsemi_reload_eeprom(dev);
...@@ -836,7 +857,8 @@ static int __devinit natsemi_probe1 (struct pci_dev *pdev, ...@@ -836,7 +857,8 @@ static int __devinit natsemi_probe1 (struct pci_dev *pdev,
printk(KERN_INFO "%s: Transceiver default autonegotiation %s " printk(KERN_INFO "%s: Transceiver default autonegotiation %s "
"10%s %s duplex.\n", "10%s %s duplex.\n",
dev->name, dev->name,
chip_config & CfgAnegEnable ? "enabled, advertise" : "disabled, force", chip_config & CfgAnegEnable ?
"enabled, advertise" : "disabled, force",
chip_config & CfgAneg100 ? "0" : "", chip_config & CfgAneg100 ? "0" : "",
chip_config & CfgAnegFull ? "full" : "half"); chip_config & CfgAnegFull ? "full" : "half");
} }
...@@ -856,7 +878,7 @@ static int __devinit natsemi_probe1 (struct pci_dev *pdev, ...@@ -856,7 +878,7 @@ static int __devinit natsemi_probe1 (struct pci_dev *pdev,
return 0; return 0;
} }
/* Read the EEPROM and MII Management Data I/O (MDIO) interfaces. /* Read the EEPROM and MII Management Data I/O (MDIO) interfaces.
The EEPROM code is for the common 93c06/46 EEPROMs with 6 bit addresses. */ The EEPROM code is for the common 93c06/46 EEPROMs with 6 bit addresses. */
...@@ -911,8 +933,8 @@ static int eeprom_read(long addr, int location) ...@@ -911,8 +933,8 @@ static int eeprom_read(long addr, int location)
} }
/* MII transceiver control section. /* MII transceiver control section.
The 83815 series has an internal transceiver, and we present the * The 83815 series has an internal transceiver, and we present the
management registers as if they were MII connected. */ * management registers as if they were MII connected. */
static int mdio_read(struct net_device *dev, int phy_id, int reg) static int mdio_read(struct net_device *dev, int phy_id, int reg)
{ {
...@@ -982,8 +1004,8 @@ static void natsemi_reset(struct net_device *dev) ...@@ -982,8 +1004,8 @@ static void natsemi_reset(struct net_device *dev)
break; break;
udelay(5); udelay(5);
} }
if (i==NATSEMI_HW_TIMEOUT && netif_msg_hw(np)) { if (i==NATSEMI_HW_TIMEOUT) {
printk(KERN_INFO "%s: reset did not complete in %d usec.\n", printk(KERN_WARN "%s: reset did not complete in %d usec.\n",
dev->name, i*5); dev->name, i*5);
} else if (netif_msg_hw(np)) { } else if (netif_msg_hw(np)) {
printk(KERN_DEBUG "%s: reset completed in %d usec.\n", printk(KERN_DEBUG "%s: reset completed in %d usec.\n",
...@@ -1009,7 +1031,6 @@ static void natsemi_reset(struct net_device *dev) ...@@ -1009,7 +1031,6 @@ static void natsemi_reset(struct net_device *dev)
} }
/* restore RFCR */ /* restore RFCR */
writel(rfcr, dev->base_addr + RxFilterAddr); writel(rfcr, dev->base_addr + RxFilterAddr);
} }
static void natsemi_reload_eeprom(struct net_device *dev) static void natsemi_reload_eeprom(struct net_device *dev)
...@@ -1019,16 +1040,16 @@ static void natsemi_reload_eeprom(struct net_device *dev) ...@@ -1019,16 +1040,16 @@ static void natsemi_reload_eeprom(struct net_device *dev)
writel(EepromReload, dev->base_addr + PCIBusCfg); writel(EepromReload, dev->base_addr + PCIBusCfg);
for (i=0;i<NATSEMI_HW_TIMEOUT;i++) { for (i=0;i<NATSEMI_HW_TIMEOUT;i++) {
udelay(50);
if (!(readl(dev->base_addr + PCIBusCfg) & EepromReload)) if (!(readl(dev->base_addr + PCIBusCfg) & EepromReload))
break; break;
udelay(5);
} }
if (i==NATSEMI_HW_TIMEOUT && netif_msg_hw(np)) { if (i==NATSEMI_HW_TIMEOUT) {
printk(KERN_INFO "%s: EEPROM did not reload in %d usec.\n", printk(KERN_WARN "%s: EEPROM did not reload in %d usec.\n",
dev->name, i*5); dev->name, i*50);
} else if (netif_msg_hw(np)) { } else if (netif_msg_hw(np)) {
printk(KERN_DEBUG "%s: EEPROM reloaded in %d usec.\n", printk(KERN_DEBUG "%s: EEPROM reloaded in %d usec.\n",
dev->name, i*5); dev->name, i*50);
} }
} }
...@@ -1044,8 +1065,8 @@ static void natsemi_stop_rxtx(struct net_device *dev) ...@@ -1044,8 +1065,8 @@ static void natsemi_stop_rxtx(struct net_device *dev)
break; break;
udelay(5); udelay(5);
} }
if (i==NATSEMI_HW_TIMEOUT && netif_msg_hw(np)) { if (i==NATSEMI_HW_TIMEOUT) {
printk(KERN_INFO "%s: Tx/Rx process did not stop in %d usec.\n", printk(KERN_WARN "%s: Tx/Rx process did not stop in %d usec.\n",
dev->name, i*5); dev->name, i*5);
} else if (netif_msg_hw(np)) { } else if (netif_msg_hw(np)) {
printk(KERN_DEBUG "%s: Tx/Rx process stopped in %d usec.\n", printk(KERN_DEBUG "%s: Tx/Rx process stopped in %d usec.\n",
...@@ -1076,6 +1097,14 @@ static int netdev_open(struct net_device *dev) ...@@ -1076,6 +1097,14 @@ static int netdev_open(struct net_device *dev)
init_ring(dev); init_ring(dev);
spin_lock_irq(&np->lock); spin_lock_irq(&np->lock);
init_registers(dev); init_registers(dev);
/* now set the MAC address according to dev->dev_addr */
for (i = 0; i < 3; i++) {
u16 mac = (dev->dev_addr[2*i+1]<<8) + dev->dev_addr[2*i];
writel(i*2, ioaddr + RxFilterAddr);
writew(mac, ioaddr + RxFilterData);
}
writel(np->cur_rx_mode, ioaddr + RxFilterAddr);
spin_unlock_irq(&np->lock); spin_unlock_irq(&np->lock);
netif_start_queue(dev); netif_start_queue(dev);
...@@ -1096,6 +1125,11 @@ static int netdev_open(struct net_device *dev) ...@@ -1096,6 +1125,11 @@ static int netdev_open(struct net_device *dev)
static void do_cable_magic(struct net_device *dev) static void do_cable_magic(struct net_device *dev)
{ {
struct netdev_private *np = dev->priv;
if (np->srr >= SRR_DP83816_A5)
return;
/* /*
* 100 MBit links with short cables can trip an issue with the chip. * 100 MBit links with short cables can trip an issue with the chip.
* The problem manifests as lots of CRC errors and/or flickering * The problem manifests as lots of CRC errors and/or flickering
...@@ -1134,6 +1168,9 @@ static void undo_cable_magic(struct net_device *dev) ...@@ -1134,6 +1168,9 @@ static void undo_cable_magic(struct net_device *dev)
u16 data; u16 data;
struct netdev_private *np = dev->priv; struct netdev_private *np = dev->priv;
if (np->srr >= SRR_DP83816_A5)
return;
writew(1, dev->base_addr + PGSEL); writew(1, dev->base_addr + PGSEL);
/* make sure the lock bit is clear */ /* make sure the lock bit is clear */
data = readw(dev->base_addr + DSPCFG); data = readw(dev->base_addr + DSPCFG);
...@@ -1281,13 +1318,16 @@ static void init_registers(struct net_device *dev) ...@@ -1281,13 +1318,16 @@ static void init_registers(struct net_device *dev)
} }
/* /*
* netdev_timer:
* Purpose: * Purpose:
* check for sudden death of the NIC: * 1) check for link changes. Usually they are handled by the MII interrupt
* * but it doesn't hurt to check twice.
* 2) check for sudden death of the NIC:
* It seems that a reference set for this chip went out with incorrect info, * It seems that a reference set for this chip went out with incorrect info,
* and there exist boards that aren't quite right. An unexpected voltage drop * and there exist boards that aren't quite right. An unexpected voltage
* can cause the PHY to get itself in a weird state (basically reset..). * drop can cause the PHY to get itself in a weird state (basically reset).
* NOTE: this only seems to affect revC chips. * NOTE: this only seems to affect revC chips.
* 3) check of death of the RX path due to OOM
*/ */
static void netdev_timer(unsigned long data) static void netdev_timer(unsigned long data)
{ {
...@@ -1319,6 +1359,9 @@ static void netdev_timer(unsigned long data) ...@@ -1319,6 +1359,9 @@ static void netdev_timer(unsigned long data)
"re-initializing\n", dev->name); "re-initializing\n", dev->name);
disable_irq(dev->irq); disable_irq(dev->irq);
spin_lock_irq(&np->lock); spin_lock_irq(&np->lock);
natsemi_stop_rxtx(dev);
dump_ring(dev);
reinit_ring(dev);
init_registers(dev); init_registers(dev);
spin_unlock_irq(&np->lock); spin_unlock_irq(&np->lock);
enable_irq(dev->irq); enable_irq(dev->irq);
...@@ -1332,6 +1375,17 @@ static void netdev_timer(unsigned long data) ...@@ -1332,6 +1375,17 @@ static void netdev_timer(unsigned long data)
check_link(dev); check_link(dev);
spin_unlock_irq(&np->lock); spin_unlock_irq(&np->lock);
} }
if (np->oom) {
disable_irq(dev->irq);
np->oom = 0;
refill_rx(dev);
enable_irq(dev->irq);
if (!np->oom) {
writel(RxOn, dev->base_addr + ChipCmd);
} else {
next_tick = 1;
}
}
mod_timer(&np->timer, jiffies + next_tick); mod_timer(&np->timer, jiffies + next_tick);
} }
...@@ -1365,7 +1419,7 @@ static void tx_timeout(struct net_device *dev) ...@@ -1365,7 +1419,7 @@ static void tx_timeout(struct net_device *dev)
disable_irq(dev->irq); disable_irq(dev->irq);
spin_lock_irq(&np->lock); spin_lock_irq(&np->lock);
if (netif_device_present(dev)) { if (!np->hands_off) {
if (netif_msg_tx_err(np)) if (netif_msg_tx_err(np))
printk(KERN_WARNING printk(KERN_WARNING
"%s: Transmit timed out, status %#08x," "%s: Transmit timed out, status %#08x,"
...@@ -1374,12 +1428,11 @@ static void tx_timeout(struct net_device *dev) ...@@ -1374,12 +1428,11 @@ static void tx_timeout(struct net_device *dev)
dump_ring(dev); dump_ring(dev);
natsemi_reset(dev); natsemi_reset(dev);
drain_ring(dev); reinit_ring(dev);
init_ring(dev);
init_registers(dev); init_registers(dev);
} else { } else {
printk(KERN_WARNING printk(KERN_WARNING
"%s: tx_timeout while in suspended state?\n", "%s: tx_timeout while in hands_off state?\n",
dev->name); dev->name);
} }
spin_unlock_irq(&np->lock); spin_unlock_irq(&np->lock);
...@@ -1402,16 +1455,54 @@ static int alloc_ring(struct net_device *dev) ...@@ -1402,16 +1455,54 @@ static int alloc_ring(struct net_device *dev)
return 0; return 0;
} }
static void refill_rx(struct net_device *dev)
{
struct netdev_private *np = dev->priv;
/* Refill the Rx ring buffers. */
for (; np->cur_rx - np->dirty_rx > 0; np->dirty_rx++) {
struct sk_buff *skb;
int entry = np->dirty_rx % RX_RING_SIZE;
if (np->rx_skbuff[entry] == NULL) {
skb = dev_alloc_skb(np->rx_buf_sz);
np->rx_skbuff[entry] = skb;
if (skb == NULL)
break; /* Better luck next round. */
skb->dev = dev; /* Mark as being used by this device. */
np->rx_dma[entry] = pci_map_single(np->pci_dev,
skb->data, skb->len, PCI_DMA_FROMDEVICE);
np->rx_ring[entry].addr = cpu_to_le32(np->rx_dma[entry]);
}
np->rx_ring[entry].cmd_status = cpu_to_le32(np->rx_buf_sz);
}
if (np->cur_rx - np->dirty_rx == RX_RING_SIZE) {
if (netif_msg_rx_err(np))
printk(KERN_WARN "%s: going OOM.\n", dev->name);
np->oom = 1;
}
}
/* Initialize the Rx and Tx rings, along with various 'dev' bits. */ /* Initialize the Rx and Tx rings, along with various 'dev' bits. */
static void init_ring(struct net_device *dev) static void init_ring(struct net_device *dev)
{ {
struct netdev_private *np = dev->priv; struct netdev_private *np = dev->priv;
int i; int i;
np->cur_rx = np->cur_tx = 0; /* 1) TX ring */
np->dirty_rx = np->dirty_tx = 0; np->dirty_tx = np->cur_tx = 0;
for (i = 0; i < TX_RING_SIZE; i++) {
np->tx_skbuff[i] = NULL;
np->tx_ring[i].next_desc = cpu_to_le32(np->ring_dma
+sizeof(struct netdev_desc)
*((i+1)%TX_RING_SIZE+RX_RING_SIZE));
np->tx_ring[i].cmd_status = 0;
}
/* 2) RX ring */
np->dirty_rx = 0;
np->cur_rx = RX_RING_SIZE;
np->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32); np->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
np->oom = 0;
np->rx_head_desc = &np->rx_ring[0]; np->rx_head_desc = &np->rx_ring[0];
/* Please be carefull before changing this loop - at least gcc-2.95.1 /* Please be carefull before changing this loop - at least gcc-2.95.1
...@@ -1425,29 +1516,25 @@ static void init_ring(struct net_device *dev) ...@@ -1425,29 +1516,25 @@ static void init_ring(struct net_device *dev)
np->rx_ring[i].cmd_status = cpu_to_le32(DescOwn); np->rx_ring[i].cmd_status = cpu_to_le32(DescOwn);
np->rx_skbuff[i] = NULL; np->rx_skbuff[i] = NULL;
} }
refill_rx(dev);
dump_ring(dev);
}
/* Fill in the Rx buffers. Handle allocation failure gracefully. */ static void drain_tx(struct net_device *dev)
for (i = 0; i < RX_RING_SIZE; i++) { {
struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz); struct netdev_private *np = dev->priv;
np->rx_skbuff[i] = skb; int i;
if (skb == NULL)
break;
skb->dev = dev; /* Mark as being used by this device. */
np->rx_dma[i] = pci_map_single(np->pci_dev,
skb->data, skb->len, PCI_DMA_FROMDEVICE);
np->rx_ring[i].addr = cpu_to_le32(np->rx_dma[i]);
np->rx_ring[i].cmd_status = cpu_to_le32(np->rx_buf_sz);
}
np->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
for (i = 0; i < TX_RING_SIZE; i++) { for (i = 0; i < TX_RING_SIZE; i++) {
if (np->tx_skbuff[i]) {
pci_unmap_single(np->pci_dev,
np->rx_dma[i], np->rx_skbuff[i]->len,
PCI_DMA_TODEVICE);
dev_kfree_skb(np->tx_skbuff[i]);
np->stats.tx_dropped++;
}
np->tx_skbuff[i] = NULL; np->tx_skbuff[i] = NULL;
np->tx_ring[i].next_desc = cpu_to_le32(np->ring_dma
+sizeof(struct netdev_desc)
*((i+1)%TX_RING_SIZE+RX_RING_SIZE));
np->tx_ring[i].cmd_status = 0;
} }
dump_ring(dev);
} }
static void drain_ring(struct net_device *dev) static void drain_ring(struct net_device *dev)
...@@ -1461,23 +1548,13 @@ static void drain_ring(struct net_device *dev) ...@@ -1461,23 +1548,13 @@ static void drain_ring(struct net_device *dev)
np->rx_ring[i].addr = 0xBADF00D0; /* An invalid address. */ np->rx_ring[i].addr = 0xBADF00D0; /* An invalid address. */
if (np->rx_skbuff[i]) { if (np->rx_skbuff[i]) {
pci_unmap_single(np->pci_dev, pci_unmap_single(np->pci_dev,
np->rx_dma[i], np->rx_dma[i], np->rx_skbuff[i]->len,
np->rx_skbuff[i]->len,
PCI_DMA_FROMDEVICE); PCI_DMA_FROMDEVICE);
dev_kfree_skb(np->rx_skbuff[i]); dev_kfree_skb(np->rx_skbuff[i]);
} }
np->rx_skbuff[i] = NULL; np->rx_skbuff[i] = NULL;
} }
for (i = 0; i < TX_RING_SIZE; i++) { drain_tx(dev);
if (np->tx_skbuff[i]) {
pci_unmap_single(np->pci_dev,
np->rx_dma[i],
np->rx_skbuff[i]->len,
PCI_DMA_TODEVICE);
dev_kfree_skb(np->tx_skbuff[i]);
}
np->tx_skbuff[i] = NULL;
}
} }
static void free_ring(struct net_device *dev) static void free_ring(struct net_device *dev)
...@@ -1488,6 +1565,28 @@ static void free_ring(struct net_device *dev) ...@@ -1488,6 +1565,28 @@ static void free_ring(struct net_device *dev)
np->rx_ring, np->ring_dma); np->rx_ring, np->ring_dma);
} }
static void reinit_ring(struct net_device *dev)
{
struct netdev_private *np = dev->priv;
int i;
/* drain TX ring */
drain_tx(dev);
np->dirty_tx = np->cur_tx = 0;
for (i=0;i<TX_RING_SIZE;i++)
np->tx_ring[i].cmd_status = 0;
/* RX Ring */
np->dirty_rx = 0;
np->cur_rx = RX_RING_SIZE;
np->rx_head_desc = &np->rx_ring[0];
/* Initialize all Rx descriptors. */
for (i = 0; i < RX_RING_SIZE; i++)
np->rx_ring[i].cmd_status = cpu_to_le32(DescOwn);
refill_rx(dev);
}
static int start_tx(struct sk_buff *skb, struct net_device *dev) static int start_tx(struct sk_buff *skb, struct net_device *dev)
{ {
struct netdev_private *np = dev->priv; struct netdev_private *np = dev->priv;
...@@ -1507,7 +1606,7 @@ static int start_tx(struct sk_buff *skb, struct net_device *dev) ...@@ -1507,7 +1606,7 @@ static int start_tx(struct sk_buff *skb, struct net_device *dev)
spin_lock_irq(&np->lock); spin_lock_irq(&np->lock);
if (netif_device_present(dev)) { if (!np->hands_off) {
np->tx_ring[entry].cmd_status = cpu_to_le32(DescOwn | skb->len); np->tx_ring[entry].cmd_status = cpu_to_le32(DescOwn | skb->len);
/* StrongARM: Explicitly cache flush np->tx_ring and /* StrongARM: Explicitly cache flush np->tx_ring and
* skb->data,skb->len. */ * skb->data,skb->len. */
...@@ -1552,7 +1651,8 @@ static void netdev_tx_done(struct net_device *dev) ...@@ -1552,7 +1651,8 @@ static void netdev_tx_done(struct net_device *dev)
np->stats.tx_packets++; np->stats.tx_packets++;
np->stats.tx_bytes += np->tx_skbuff[entry]->len; np->stats.tx_bytes += np->tx_skbuff[entry]->len;
} else { /* Various Tx errors */ } else { /* Various Tx errors */
int tx_status = le32_to_cpu(np->tx_ring[entry].cmd_status); int tx_status =
le32_to_cpu(np->tx_ring[entry].cmd_status);
if (tx_status & (DescTxAbort|DescTxExcColl)) if (tx_status & (DescTxAbort|DescTxExcColl))
np->stats.tx_aborted_errors++; np->stats.tx_aborted_errors++;
if (tx_status & DescTxFIFO) if (tx_status & DescTxFIFO)
...@@ -1586,15 +1686,17 @@ static void intr_handler(int irq, void *dev_instance, struct pt_regs *rgs) ...@@ -1586,15 +1686,17 @@ static void intr_handler(int irq, void *dev_instance, struct pt_regs *rgs)
long ioaddr = dev->base_addr; long ioaddr = dev->base_addr;
int boguscnt = max_interrupt_work; int boguscnt = max_interrupt_work;
if (!netif_device_present(dev)) if (np->hands_off)
return; return;
do { do {
/* Reading automatically acknowledges all int sources. */ /* Reading automatically acknowledges all int sources. */
u32 intr_status = readl(ioaddr + IntrStatus); u32 intr_status = readl(ioaddr + IntrStatus);
if (netif_msg_intr(np)) if (netif_msg_intr(np))
printk(KERN_DEBUG "%s: Interrupt, status %#08x.\n", printk(KERN_DEBUG
dev->name, intr_status); "%s: Interrupt, status %#08x, mask %#08x.\n",
dev->name, intr_status,
readl(ioaddr + IntrMask));
if (intr_status == 0) if (intr_status == 0)
break; break;
...@@ -1620,7 +1722,8 @@ static void intr_handler(int irq, void *dev_instance, struct pt_regs *rgs) ...@@ -1620,7 +1722,8 @@ static void intr_handler(int irq, void *dev_instance, struct pt_regs *rgs)
if (netif_msg_intr(np)) if (netif_msg_intr(np))
printk(KERN_WARNING printk(KERN_WARNING
"%s: Too much work at interrupt, " "%s: Too much work at interrupt, "
"status=%#08x.\n", dev->name, intr_status); "status=%#08x.\n",
dev->name, intr_status);
break; break;
} }
} while (1); } while (1);
...@@ -1677,16 +1780,19 @@ static void netdev_rx(struct net_device *dev) ...@@ -1677,16 +1780,19 @@ static void netdev_rx(struct net_device *dev)
if (pkt_len < rx_copybreak if (pkt_len < rx_copybreak
&& (skb = dev_alloc_skb(pkt_len + 2)) != NULL) { && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
skb->dev = dev; skb->dev = dev;
skb_reserve(skb, 2); /* 16 byte align the IP header */ /* 16 byte align the IP header */
pci_dma_sync_single(np->pci_dev, np->rx_dma[entry], skb_reserve(skb, 2);
pci_dma_sync_single(np->pci_dev,
np->rx_dma[entry],
np->rx_skbuff[entry]->len, np->rx_skbuff[entry]->len,
PCI_DMA_FROMDEVICE); PCI_DMA_FROMDEVICE);
#if HAS_IP_COPYSUM #if HAS_IP_COPYSUM
eth_copy_and_sum(skb, np->rx_skbuff[entry]->tail, pkt_len, 0); eth_copy_and_sum(skb,
np->rx_skbuff[entry]->tail, pkt_len, 0);
skb_put(skb, pkt_len); skb_put(skb, pkt_len);
#else #else
memcpy(skb_put(skb, pkt_len), np->rx_skbuff[entry]->tail, memcpy(skb_put(skb, pkt_len),
pkt_len); np->rx_skbuff[entry]->tail, pkt_len);
#endif #endif
} else { } else {
pci_unmap_single(np->pci_dev, np->rx_dma[entry], pci_unmap_single(np->pci_dev, np->rx_dma[entry],
...@@ -1696,7 +1802,6 @@ static void netdev_rx(struct net_device *dev) ...@@ -1696,7 +1802,6 @@ static void netdev_rx(struct net_device *dev)
np->rx_skbuff[entry] = NULL; np->rx_skbuff[entry] = NULL;
} }
skb->protocol = eth_type_trans(skb, dev); skb->protocol = eth_type_trans(skb, dev);
/* W/ hardware checksum: skb->ip_summed = CHECKSUM_UNNECESSARY; */
netif_rx(skb); netif_rx(skb);
dev->last_rx = jiffies; dev->last_rx = jiffies;
np->stats.rx_packets++; np->stats.rx_packets++;
...@@ -1706,26 +1811,12 @@ static void netdev_rx(struct net_device *dev) ...@@ -1706,26 +1811,12 @@ static void netdev_rx(struct net_device *dev)
np->rx_head_desc = &np->rx_ring[entry]; np->rx_head_desc = &np->rx_ring[entry];
desc_status = le32_to_cpu(np->rx_head_desc->cmd_status); desc_status = le32_to_cpu(np->rx_head_desc->cmd_status);
} }
refill_rx(dev);
/* Refill the Rx ring buffers. */
for (; np->cur_rx - np->dirty_rx > 0; np->dirty_rx++) {
struct sk_buff *skb;
entry = np->dirty_rx % RX_RING_SIZE;
if (np->rx_skbuff[entry] == NULL) {
skb = dev_alloc_skb(np->rx_buf_sz);
np->rx_skbuff[entry] = skb;
if (skb == NULL)
break; /* Better luck next round. */
skb->dev = dev; /* Mark as being used by this device. */
np->rx_dma[entry] = pci_map_single(np->pci_dev,
skb->data, skb->len, PCI_DMA_FROMDEVICE);
np->rx_ring[entry].addr = cpu_to_le32(np->rx_dma[entry]);
}
np->rx_ring[entry].cmd_status =
cpu_to_le32(np->rx_buf_sz);
}
/* Restart Rx engine if stopped. */ /* Restart Rx engine if stopped. */
if (np->oom)
mod_timer(&np->timer, jiffies + 1);
else
writel(RxOn, dev->base_addr + ChipCmd); writel(RxOn, dev->base_addr + ChipCmd);
} }
...@@ -1800,7 +1891,7 @@ static struct net_device_stats *get_stats(struct net_device *dev) ...@@ -1800,7 +1891,7 @@ static struct net_device_stats *get_stats(struct net_device *dev)
/* The chip only need report frame silently dropped. */ /* The chip only need report frame silently dropped. */
spin_lock_irq(&np->lock); spin_lock_irq(&np->lock);
if (netif_running(dev) && netif_device_present(dev)) if (netif_running(dev) && !np->hands_off)
__get_stats(dev); __get_stats(dev);
spin_unlock_irq(&np->lock); spin_unlock_irq(&np->lock);
...@@ -1869,7 +1960,8 @@ static void __set_rx_mode(struct net_device *dev) ...@@ -1869,7 +1960,8 @@ static void __set_rx_mode(struct net_device *dev)
memset(mc_filter, 0, sizeof(mc_filter)); memset(mc_filter, 0, sizeof(mc_filter));
for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count; for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
i++, mclist = mclist->next) { i++, mclist = mclist->next) {
set_bit_le(dp83815_crc(ETH_ALEN, mclist->dmi_addr) & 0x1ff, set_bit_le(
dp83815_crc(ETH_ALEN, mclist->dmi_addr) & 0x1ff,
mc_filter); mc_filter);
} }
rx_mode = RxFilterEnable | AcceptBroadcast rx_mode = RxFilterEnable | AcceptBroadcast
...@@ -1888,7 +1980,7 @@ static void set_rx_mode(struct net_device *dev) ...@@ -1888,7 +1980,7 @@ static void set_rx_mode(struct net_device *dev)
{ {
struct netdev_private *np = dev->priv; struct netdev_private *np = dev->priv;
spin_lock_irq(&np->lock); spin_lock_irq(&np->lock);
if (netif_device_present(dev)) if (!np->hands_off)
__set_rx_mode(dev); __set_rx_mode(dev);
spin_unlock_irq(&np->lock); spin_unlock_irq(&np->lock);
} }
...@@ -2082,7 +2174,7 @@ static int netdev_set_wol(struct net_device *dev, u32 newval) ...@@ -2082,7 +2174,7 @@ static int netdev_set_wol(struct net_device *dev, u32 newval)
data |= WakeArp; data |= WakeArp;
if (newval & WAKE_MAGIC) if (newval & WAKE_MAGIC)
data |= WakeMagic; data |= WakeMagic;
if (np->srr >= SRR_REV_D) { if (np->srr >= SRR_DP83815_D) {
if (newval & WAKE_MAGICSECURE) { if (newval & WAKE_MAGICSECURE) {
data |= WakeMagicSecure; data |= WakeMagicSecure;
} }
...@@ -2101,7 +2193,7 @@ static int netdev_get_wol(struct net_device *dev, u32 *supported, u32 *cur) ...@@ -2101,7 +2193,7 @@ static int netdev_get_wol(struct net_device *dev, u32 *supported, u32 *cur)
*supported = (WAKE_PHY | WAKE_UCAST | WAKE_MCAST | WAKE_BCAST *supported = (WAKE_PHY | WAKE_UCAST | WAKE_MCAST | WAKE_BCAST
| WAKE_ARP | WAKE_MAGIC); | WAKE_ARP | WAKE_MAGIC);
if (np->srr >= SRR_REV_D) { if (np->srr >= SRR_DP83815_D) {
/* SOPASS works on revD and higher */ /* SOPASS works on revD and higher */
*supported |= WAKE_MAGICSECURE; *supported |= WAKE_MAGICSECURE;
} }
...@@ -2134,7 +2226,7 @@ static int netdev_set_sopass(struct net_device *dev, u8 *newval) ...@@ -2134,7 +2226,7 @@ static int netdev_set_sopass(struct net_device *dev, u8 *newval)
u16 *sval = (u16 *)newval; u16 *sval = (u16 *)newval;
u32 addr; u32 addr;
if (np->srr < SRR_REV_D) { if (np->srr < SRR_DP83815_D) {
return 0; return 0;
} }
...@@ -2165,7 +2257,7 @@ static int netdev_get_sopass(struct net_device *dev, u8 *data) ...@@ -2165,7 +2257,7 @@ static int netdev_get_sopass(struct net_device *dev, u8 *data)
u16 *sval = (u16 *)data; u16 *sval = (u16 *)data;
u32 addr; u32 addr;
if (np->srr < SRR_REV_D) { if (np->srr < SRR_DP83815_D) {
sval[0] = sval[1] = sval[2] = 0; sval[0] = sval[1] = sval[2] = 0;
return 0; return 0;
} }
...@@ -2415,9 +2507,6 @@ static int netdev_close(struct net_device *dev) ...@@ -2415,9 +2507,6 @@ static int netdev_close(struct net_device *dev)
long ioaddr = dev->base_addr; long ioaddr = dev->base_addr;
struct netdev_private *np = dev->priv; struct netdev_private *np = dev->priv;
netif_stop_queue(dev);
netif_carrier_off(dev);
if (netif_msg_ifdown(np)) if (netif_msg_ifdown(np))
printk(KERN_DEBUG printk(KERN_DEBUG
"%s: Shutting down ethercard, status was %#04x.\n", "%s: Shutting down ethercard, status was %#04x.\n",
...@@ -2428,13 +2517,31 @@ static int netdev_close(struct net_device *dev) ...@@ -2428,13 +2517,31 @@ static int netdev_close(struct net_device *dev)
dev->name, np->cur_tx, np->dirty_tx, dev->name, np->cur_tx, np->dirty_tx,
np->cur_rx, np->dirty_rx); np->cur_rx, np->dirty_rx);
del_timer_sync(&np->timer); /*
* FIXME: what if someone tries to close a device
* that is suspended?
* Should we reenable the nic to switch to
* the final WOL settings?
*/
del_timer_sync(&np->timer);
disable_irq(dev->irq); disable_irq(dev->irq);
spin_lock_irq(&np->lock); spin_lock_irq(&np->lock);
/* Disable interrupts, and flush posted writes */
/* Disable and clear interrupts */
writel(0, ioaddr + IntrEnable); writel(0, ioaddr + IntrEnable);
readl(ioaddr + IntrEnable);
np->hands_off = 1;
spin_unlock_irq(&np->lock);
enable_irq(dev->irq);
free_irq(dev->irq, dev);
/* Interrupt disabled, interrupt handler released,
* queue stopped, timer deleted, rtnl_lock held
* All async codepaths that access the driver are disabled.
*/
spin_lock_irq(&np->lock);
np->hands_off = 0;
readl(ioaddr + IntrMask); readl(ioaddr + IntrMask);
readw(ioaddr + MIntrStatus); readw(ioaddr + MIntrStatus);
...@@ -2447,19 +2554,9 @@ static int netdev_close(struct net_device *dev) ...@@ -2447,19 +2554,9 @@ static int netdev_close(struct net_device *dev)
__get_stats(dev); __get_stats(dev);
spin_unlock_irq(&np->lock); spin_unlock_irq(&np->lock);
/* race: shared irq and as most nics the DP83815
* reports _all_ interrupt conditions in IntrStatus, even
* disabled ones.
* packet received after disable_irq, but before stop_rxtx
* --> race. intr_handler would restart the rx process.
* netif_device_{de,a}tach around {enable,free}_irq.
*/
netif_device_detach(dev);
enable_irq(dev->irq);
free_irq(dev->irq, dev);
netif_device_attach(dev);
/* clear the carrier last - an interrupt could reenable it otherwise */ /* clear the carrier last - an interrupt could reenable it otherwise */
netif_carrier_off(dev); netif_carrier_off(dev);
netif_stop_queue(dev);
dump_ring(dev); dump_ring(dev);
drain_ring(dev); drain_ring(dev);
...@@ -2480,7 +2577,7 @@ static int netdev_close(struct net_device *dev) ...@@ -2480,7 +2577,7 @@ static int netdev_close(struct net_device *dev)
return 0; return 0;
} }
static void __devexit natsemi_remove1 (struct pci_dev *pdev) static void __devexit natsemi_remove1 (struct pci_dev *pdev)
{ {
struct net_device *dev = pci_get_drvdata(pdev); struct net_device *dev = pci_get_drvdata(pdev);
...@@ -2495,23 +2592,26 @@ static void __devexit natsemi_remove1 (struct pci_dev *pdev) ...@@ -2495,23 +2592,26 @@ static void __devexit natsemi_remove1 (struct pci_dev *pdev)
#ifdef CONFIG_PM #ifdef CONFIG_PM
/* /*
* The ns83815 chip doesn't have explicit RxStop bits.
* Kicking the Rx or Tx process for a new packet reenables the Rx process
* of the nic, thus this function must be very careful:
*
* suspend/resume synchronization: * suspend/resume synchronization:
* entry points: * entry points:
* netdev_open, netdev_close, netdev_ioctl, set_rx_mode, intr_handler, * netdev_open, netdev_close, netdev_ioctl, set_rx_mode, intr_handler,
* start_tx, tx_timeout * start_tx, tx_timeout
* Reading from some registers can restart the nic! *
* No function accesses the hardware without checking netif_device_present(). * No function accesses the hardware without checking np->hands_off.
* the check occurs under spin_lock_irq(&np->lock); * the check occurs under spin_lock_irq(&np->lock);
* exceptions: * exceptions:
* * netdev_ioctl, netdev_open. * * netdev_ioctl: noncritical access.
* net/core checks netif_device_present() before calling them. * * netdev_open: cannot happen due to the device_detach
* * netdev_close: doesn't hurt. * * netdev_close: doesn't hurt.
* * netdev_timer: timer stopped by natsemi_suspend. * * netdev_timer: timer stopped by natsemi_suspend.
* * intr_handler: doesn't acquire the spinlock. suspend calls * * intr_handler: doesn't acquire the spinlock. suspend calls
* disable_irq() to enforce synchronization. * disable_irq() to enforce synchronization.
* *
* netif_device_detach must occur under spin_unlock_irq(), interrupts from a * Interrupts must be disabled, otherwise hands_off can cause irq storms.
* detached device would cause an irq storm.
*/ */
static int natsemi_suspend (struct pci_dev *pdev, u32 state) static int natsemi_suspend (struct pci_dev *pdev, u32 state)
...@@ -2528,9 +2628,9 @@ static int natsemi_suspend (struct pci_dev *pdev, u32 state) ...@@ -2528,9 +2628,9 @@ static int natsemi_suspend (struct pci_dev *pdev, u32 state)
spin_lock_irq(&np->lock); spin_lock_irq(&np->lock);
writel(0, ioaddr + IntrEnable); writel(0, ioaddr + IntrEnable);
np->hands_off = 1;
natsemi_stop_rxtx(dev); natsemi_stop_rxtx(dev);
netif_stop_queue(dev); netif_stop_queue(dev);
netif_device_detach(dev);
spin_unlock_irq(&np->lock); spin_unlock_irq(&np->lock);
enable_irq(dev->irq); enable_irq(dev->irq);
...@@ -2554,9 +2654,8 @@ static int natsemi_suspend (struct pci_dev *pdev, u32 state) ...@@ -2554,9 +2654,8 @@ static int natsemi_suspend (struct pci_dev *pdev, u32 state)
writel(np->SavedClkRun, ioaddr + ClkRun); writel(np->SavedClkRun, ioaddr + ClkRun);
} }
} }
} else {
netif_device_detach(dev);
} }
netif_device_detach(dev);
rtnl_unlock(); rtnl_unlock();
return 0; return 0;
} }
...@@ -2571,20 +2670,23 @@ static int natsemi_resume (struct pci_dev *pdev) ...@@ -2571,20 +2670,23 @@ static int natsemi_resume (struct pci_dev *pdev)
if (netif_device_present(dev)) if (netif_device_present(dev))
goto out; goto out;
if (netif_running(dev)) { if (netif_running(dev)) {
BUG_ON(!np->hands_off);
pci_enable_device(pdev); pci_enable_device(pdev);
/* pci_power_on(pdev); */ /* pci_power_on(pdev); */
natsemi_reset(dev); natsemi_reset(dev);
init_ring(dev); init_ring(dev);
disable_irq(dev->irq);
spin_lock_irq(&np->lock); spin_lock_irq(&np->lock);
np->hands_off = 0;
init_registers(dev); init_registers(dev);
netif_device_attach(dev); netif_device_attach(dev);
spin_unlock_irq(&np->lock); spin_unlock_irq(&np->lock);
enable_irq(dev->irq);
mod_timer(&np->timer, jiffies + 1*HZ); mod_timer(&np->timer, jiffies + 1*HZ);
} else {
netif_device_attach(dev);
} }
netif_device_attach(dev);
out: out:
rtnl_unlock(); rtnl_unlock();
return 0; return 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment