Commit f34ba4e1 authored by Andrew Morton's avatar Andrew Morton Committed by Jeff Garzik

[PATCH] revert "forcedeth: fix multi irq issues"

Revert ebf34c9b.  Maybe.  Due to crashes
at shutdown - see http://bugzilla.kernel.org/show_bug.cgi?id=6568.

Cc: Ayaz Abdulla <aabdulla@nvidia.com>
Cc: Manfred Spraul <manfred@colorfullife.com>
Cc: Jeff Garzik <jeff@garzik.org>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarJeff Garzik <jeff@garzik.org>
parent 2f880b65
...@@ -106,7 +106,6 @@ ...@@ -106,7 +106,6 @@
* 0.51: 20 Jan 2006: Add 64bit consistent memory allocation for rings. * 0.51: 20 Jan 2006: Add 64bit consistent memory allocation for rings.
* 0.52: 20 Jan 2006: Add MSI/MSIX support. * 0.52: 20 Jan 2006: Add MSI/MSIX support.
* 0.53: 19 Mar 2006: Fix init from low power mode and add hw reset. * 0.53: 19 Mar 2006: Fix init from low power mode and add hw reset.
* 0.54: 21 Mar 2006: Fix spin locks for multi irqs and cleanup.
* *
* Known bugs: * Known bugs:
* We suspect that on some hardware no TX done interrupts are generated. * We suspect that on some hardware no TX done interrupts are generated.
...@@ -118,7 +117,7 @@ ...@@ -118,7 +117,7 @@
* DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few * DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few
* superfluous timer interrupts from the nic. * superfluous timer interrupts from the nic.
*/ */
#define FORCEDETH_VERSION "0.54" #define FORCEDETH_VERSION "0.53"
#define DRV_NAME "forcedeth" #define DRV_NAME "forcedeth"
#include <linux/module.h> #include <linux/module.h>
...@@ -711,72 +710,6 @@ static void setup_hw_rings(struct net_device *dev, int rxtx_flags) ...@@ -711,72 +710,6 @@ static void setup_hw_rings(struct net_device *dev, int rxtx_flags)
} }
} }
static int using_multi_irqs(struct net_device *dev)
{
struct fe_priv *np = get_nvpriv(dev);
if (!(np->msi_flags & NV_MSI_X_ENABLED) ||
((np->msi_flags & NV_MSI_X_ENABLED) &&
((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1)))
return 0;
else
return 1;
}
static void nv_enable_irq(struct net_device *dev)
{
struct fe_priv *np = get_nvpriv(dev);
if (!using_multi_irqs(dev)) {
if (np->msi_flags & NV_MSI_X_ENABLED)
enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
else
enable_irq(dev->irq);
} else {
enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
}
}
static void nv_disable_irq(struct net_device *dev)
{
struct fe_priv *np = get_nvpriv(dev);
if (!using_multi_irqs(dev)) {
if (np->msi_flags & NV_MSI_X_ENABLED)
disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
else
disable_irq(dev->irq);
} else {
disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
}
}
/* In MSIX mode, a write to irqmask behaves as XOR */
static void nv_enable_hw_interrupts(struct net_device *dev, u32 mask)
{
u8 __iomem *base = get_hwbase(dev);
writel(mask, base + NvRegIrqMask);
}
static void nv_disable_hw_interrupts(struct net_device *dev, u32 mask)
{
struct fe_priv *np = get_nvpriv(dev);
u8 __iomem *base = get_hwbase(dev);
if (np->msi_flags & NV_MSI_X_ENABLED) {
writel(mask, base + NvRegIrqMask);
} else {
if (np->msi_flags & NV_MSI_ENABLED)
writel(0, base + NvRegMSIIrqMask);
writel(0, base + NvRegIrqMask);
}
}
#define MII_READ (-1) #define MII_READ (-1)
/* mii_rw: read/write a register on the PHY. /* mii_rw: read/write a register on the PHY.
* *
...@@ -1086,24 +1019,23 @@ static void nv_do_rx_refill(unsigned long data) ...@@ -1086,24 +1019,23 @@ static void nv_do_rx_refill(unsigned long data)
struct net_device *dev = (struct net_device *) data; struct net_device *dev = (struct net_device *) data;
struct fe_priv *np = netdev_priv(dev); struct fe_priv *np = netdev_priv(dev);
if (!using_multi_irqs(dev)) {
if (np->msi_flags & NV_MSI_X_ENABLED) if (!(np->msi_flags & NV_MSI_X_ENABLED) ||
disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); ((np->msi_flags & NV_MSI_X_ENABLED) &&
else ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) {
disable_irq(dev->irq); disable_irq(dev->irq);
} else { } else {
disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
} }
if (nv_alloc_rx(dev)) { if (nv_alloc_rx(dev)) {
spin_lock_irq(&np->lock); spin_lock(&np->lock);
if (!np->in_shutdown) if (!np->in_shutdown)
mod_timer(&np->oom_kick, jiffies + OOM_REFILL); mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
spin_unlock_irq(&np->lock); spin_unlock(&np->lock);
} }
if (!using_multi_irqs(dev)) { if (!(np->msi_flags & NV_MSI_X_ENABLED) ||
if (np->msi_flags & NV_MSI_X_ENABLED) ((np->msi_flags & NV_MSI_X_ENABLED) &&
enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) {
else
enable_irq(dev->irq); enable_irq(dev->irq);
} else { } else {
enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
...@@ -1736,7 +1668,15 @@ static int nv_change_mtu(struct net_device *dev, int new_mtu) ...@@ -1736,7 +1668,15 @@ static int nv_change_mtu(struct net_device *dev, int new_mtu)
* guessed, there is probably a simpler approach. * guessed, there is probably a simpler approach.
* Changing the MTU is a rare event, it shouldn't matter. * Changing the MTU is a rare event, it shouldn't matter.
*/ */
nv_disable_irq(dev); if (!(np->msi_flags & NV_MSI_X_ENABLED) ||
((np->msi_flags & NV_MSI_X_ENABLED) &&
((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) {
disable_irq(dev->irq);
} else {
disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
}
spin_lock_bh(&dev->xmit_lock); spin_lock_bh(&dev->xmit_lock);
spin_lock(&np->lock); spin_lock(&np->lock);
/* stop engines */ /* stop engines */
...@@ -1769,7 +1709,15 @@ static int nv_change_mtu(struct net_device *dev, int new_mtu) ...@@ -1769,7 +1709,15 @@ static int nv_change_mtu(struct net_device *dev, int new_mtu)
nv_start_tx(dev); nv_start_tx(dev);
spin_unlock(&np->lock); spin_unlock(&np->lock);
spin_unlock_bh(&dev->xmit_lock); spin_unlock_bh(&dev->xmit_lock);
nv_enable_irq(dev); if (!(np->msi_flags & NV_MSI_X_ENABLED) ||
((np->msi_flags & NV_MSI_X_ENABLED) &&
((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) {
enable_irq(dev->irq);
} else {
enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
}
} }
return 0; return 0;
} }
...@@ -2160,16 +2108,16 @@ static irqreturn_t nv_nic_irq_tx(int foo, void *data, struct pt_regs *regs) ...@@ -2160,16 +2108,16 @@ static irqreturn_t nv_nic_irq_tx(int foo, void *data, struct pt_regs *regs)
if (!(events & np->irqmask)) if (!(events & np->irqmask))
break; break;
spin_lock_irq(&np->lock); spin_lock(&np->lock);
nv_tx_done(dev); nv_tx_done(dev);
spin_unlock_irq(&np->lock); spin_unlock(&np->lock);
if (events & (NVREG_IRQ_TX_ERR)) { if (events & (NVREG_IRQ_TX_ERR)) {
dprintk(KERN_DEBUG "%s: received irq with events 0x%x. Probably TX fail.\n", dprintk(KERN_DEBUG "%s: received irq with events 0x%x. Probably TX fail.\n",
dev->name, events); dev->name, events);
} }
if (i > max_interrupt_work) { if (i > max_interrupt_work) {
spin_lock_irq(&np->lock); spin_lock(&np->lock);
/* disable interrupts on the nic */ /* disable interrupts on the nic */
writel(NVREG_IRQ_TX_ALL, base + NvRegIrqMask); writel(NVREG_IRQ_TX_ALL, base + NvRegIrqMask);
pci_push(base); pci_push(base);
...@@ -2179,7 +2127,7 @@ static irqreturn_t nv_nic_irq_tx(int foo, void *data, struct pt_regs *regs) ...@@ -2179,7 +2127,7 @@ static irqreturn_t nv_nic_irq_tx(int foo, void *data, struct pt_regs *regs)
mod_timer(&np->nic_poll, jiffies + POLL_WAIT); mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
} }
printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_tx.\n", dev->name, i); printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_tx.\n", dev->name, i);
spin_unlock_irq(&np->lock); spin_unlock(&np->lock);
break; break;
} }
...@@ -2209,14 +2157,14 @@ static irqreturn_t nv_nic_irq_rx(int foo, void *data, struct pt_regs *regs) ...@@ -2209,14 +2157,14 @@ static irqreturn_t nv_nic_irq_rx(int foo, void *data, struct pt_regs *regs)
nv_rx_process(dev); nv_rx_process(dev);
if (nv_alloc_rx(dev)) { if (nv_alloc_rx(dev)) {
spin_lock_irq(&np->lock); spin_lock(&np->lock);
if (!np->in_shutdown) if (!np->in_shutdown)
mod_timer(&np->oom_kick, jiffies + OOM_REFILL); mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
spin_unlock_irq(&np->lock); spin_unlock(&np->lock);
} }
if (i > max_interrupt_work) { if (i > max_interrupt_work) {
spin_lock_irq(&np->lock); spin_lock(&np->lock);
/* disable interrupts on the nic */ /* disable interrupts on the nic */
writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask); writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask);
pci_push(base); pci_push(base);
...@@ -2226,7 +2174,7 @@ static irqreturn_t nv_nic_irq_rx(int foo, void *data, struct pt_regs *regs) ...@@ -2226,7 +2174,7 @@ static irqreturn_t nv_nic_irq_rx(int foo, void *data, struct pt_regs *regs)
mod_timer(&np->nic_poll, jiffies + POLL_WAIT); mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
} }
printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_rx.\n", dev->name, i); printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_rx.\n", dev->name, i);
spin_unlock_irq(&np->lock); spin_unlock(&np->lock);
break; break;
} }
...@@ -2255,14 +2203,14 @@ static irqreturn_t nv_nic_irq_other(int foo, void *data, struct pt_regs *regs) ...@@ -2255,14 +2203,14 @@ static irqreturn_t nv_nic_irq_other(int foo, void *data, struct pt_regs *regs)
break; break;
if (events & NVREG_IRQ_LINK) { if (events & NVREG_IRQ_LINK) {
spin_lock_irq(&np->lock); spin_lock(&np->lock);
nv_link_irq(dev); nv_link_irq(dev);
spin_unlock_irq(&np->lock); spin_unlock(&np->lock);
} }
if (np->need_linktimer && time_after(jiffies, np->link_timeout)) { if (np->need_linktimer && time_after(jiffies, np->link_timeout)) {
spin_lock_irq(&np->lock); spin_lock(&np->lock);
nv_linkchange(dev); nv_linkchange(dev);
spin_unlock_irq(&np->lock); spin_unlock(&np->lock);
np->link_timeout = jiffies + LINK_TIMEOUT; np->link_timeout = jiffies + LINK_TIMEOUT;
} }
if (events & (NVREG_IRQ_UNKNOWN)) { if (events & (NVREG_IRQ_UNKNOWN)) {
...@@ -2270,7 +2218,7 @@ static irqreturn_t nv_nic_irq_other(int foo, void *data, struct pt_regs *regs) ...@@ -2270,7 +2218,7 @@ static irqreturn_t nv_nic_irq_other(int foo, void *data, struct pt_regs *regs)
dev->name, events); dev->name, events);
} }
if (i > max_interrupt_work) { if (i > max_interrupt_work) {
spin_lock_irq(&np->lock); spin_lock(&np->lock);
/* disable interrupts on the nic */ /* disable interrupts on the nic */
writel(NVREG_IRQ_OTHER, base + NvRegIrqMask); writel(NVREG_IRQ_OTHER, base + NvRegIrqMask);
pci_push(base); pci_push(base);
...@@ -2280,7 +2228,7 @@ static irqreturn_t nv_nic_irq_other(int foo, void *data, struct pt_regs *regs) ...@@ -2280,7 +2228,7 @@ static irqreturn_t nv_nic_irq_other(int foo, void *data, struct pt_regs *regs)
mod_timer(&np->nic_poll, jiffies + POLL_WAIT); mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
} }
printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_other.\n", dev->name, i); printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_other.\n", dev->name, i);
spin_unlock_irq(&np->lock); spin_unlock(&np->lock);
break; break;
} }
...@@ -2303,10 +2251,9 @@ static void nv_do_nic_poll(unsigned long data) ...@@ -2303,10 +2251,9 @@ static void nv_do_nic_poll(unsigned long data)
* nv_nic_irq because that may decide to do otherwise * nv_nic_irq because that may decide to do otherwise
*/ */
if (!using_multi_irqs(dev)) { if (!(np->msi_flags & NV_MSI_X_ENABLED) ||
if (np->msi_flags & NV_MSI_X_ENABLED) ((np->msi_flags & NV_MSI_X_ENABLED) &&
disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) {
else
disable_irq(dev->irq); disable_irq(dev->irq);
mask = np->irqmask; mask = np->irqmask;
} else { } else {
...@@ -2330,11 +2277,10 @@ static void nv_do_nic_poll(unsigned long data) ...@@ -2330,11 +2277,10 @@ static void nv_do_nic_poll(unsigned long data)
writel(mask, base + NvRegIrqMask); writel(mask, base + NvRegIrqMask);
pci_push(base); pci_push(base);
if (!using_multi_irqs(dev)) { if (!(np->msi_flags & NV_MSI_X_ENABLED) ||
((np->msi_flags & NV_MSI_X_ENABLED) &&
((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) {
nv_nic_irq((int) 0, (void *) data, (struct pt_regs *) NULL); nv_nic_irq((int) 0, (void *) data, (struct pt_regs *) NULL);
if (np->msi_flags & NV_MSI_X_ENABLED)
enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
else
enable_irq(dev->irq); enable_irq(dev->irq);
} else { } else {
if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) { if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) {
...@@ -2682,113 +2628,6 @@ static void set_msix_vector_map(struct net_device *dev, u32 vector, u32 irqmask) ...@@ -2682,113 +2628,6 @@ static void set_msix_vector_map(struct net_device *dev, u32 vector, u32 irqmask)
writel(readl(base + NvRegMSIXMap1) | msixmap, base + NvRegMSIXMap1); writel(readl(base + NvRegMSIXMap1) | msixmap, base + NvRegMSIXMap1);
} }
static int nv_request_irq(struct net_device *dev)
{
struct fe_priv *np = get_nvpriv(dev);
u8 __iomem *base = get_hwbase(dev);
int ret = 1;
int i;
if (np->msi_flags & NV_MSI_X_CAPABLE) {
for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) {
np->msi_x_entry[i].entry = i;
}
if ((ret = pci_enable_msix(np->pci_dev, np->msi_x_entry, (np->msi_flags & NV_MSI_X_VECTORS_MASK))) == 0) {
np->msi_flags |= NV_MSI_X_ENABLED;
if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT) {
/* Request irq for rx handling */
if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, &nv_nic_irq_rx, SA_SHIRQ, dev->name, dev) != 0) {
printk(KERN_INFO "forcedeth: request_irq failed for rx %d\n", ret);
pci_disable_msix(np->pci_dev);
np->msi_flags &= ~NV_MSI_X_ENABLED;
goto out_err;
}
/* Request irq for tx handling */
if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, &nv_nic_irq_tx, SA_SHIRQ, dev->name, dev) != 0) {
printk(KERN_INFO "forcedeth: request_irq failed for tx %d\n", ret);
pci_disable_msix(np->pci_dev);
np->msi_flags &= ~NV_MSI_X_ENABLED;
goto out_free_rx;
}
/* Request irq for link and timer handling */
if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector, &nv_nic_irq_other, SA_SHIRQ, dev->name, dev) != 0) {
printk(KERN_INFO "forcedeth: request_irq failed for link %d\n", ret);
pci_disable_msix(np->pci_dev);
np->msi_flags &= ~NV_MSI_X_ENABLED;
goto out_free_tx;
}
/* map interrupts to their respective vector */
writel(0, base + NvRegMSIXMap0);
writel(0, base + NvRegMSIXMap1);
set_msix_vector_map(dev, NV_MSI_X_VECTOR_RX, NVREG_IRQ_RX_ALL);
set_msix_vector_map(dev, NV_MSI_X_VECTOR_TX, NVREG_IRQ_TX_ALL);
set_msix_vector_map(dev, NV_MSI_X_VECTOR_OTHER, NVREG_IRQ_OTHER);
} else {
/* Request irq for all interrupts */
if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector, &nv_nic_irq, SA_SHIRQ, dev->name, dev) != 0) {
printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret);
pci_disable_msix(np->pci_dev);
np->msi_flags &= ~NV_MSI_X_ENABLED;
goto out_err;
}
/* map interrupts to vector 0 */
writel(0, base + NvRegMSIXMap0);
writel(0, base + NvRegMSIXMap1);
}
}
}
if (ret != 0 && np->msi_flags & NV_MSI_CAPABLE) {
if ((ret = pci_enable_msi(np->pci_dev)) == 0) {
np->msi_flags |= NV_MSI_ENABLED;
if (request_irq(np->pci_dev->irq, &nv_nic_irq, SA_SHIRQ, dev->name, dev) != 0) {
printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret);
pci_disable_msi(np->pci_dev);
np->msi_flags &= ~NV_MSI_ENABLED;
goto out_err;
}
/* map interrupts to vector 0 */
writel(0, base + NvRegMSIMap0);
writel(0, base + NvRegMSIMap1);
/* enable msi vector 0 */
writel(NVREG_MSI_VECTOR_0_ENABLED, base + NvRegMSIIrqMask);
}
}
if (ret != 0) {
if (request_irq(np->pci_dev->irq, &nv_nic_irq, SA_SHIRQ, dev->name, dev) != 0)
goto out_err;
}
return 0;
out_free_tx:
free_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, dev);
out_free_rx:
free_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, dev);
out_err:
return 1;
}
static void nv_free_irq(struct net_device *dev)
{
struct fe_priv *np = get_nvpriv(dev);
int i;
if (np->msi_flags & NV_MSI_X_ENABLED) {
for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) {
free_irq(np->msi_x_entry[i].vector, dev);
}
pci_disable_msix(np->pci_dev);
np->msi_flags &= ~NV_MSI_X_ENABLED;
} else {
free_irq(np->pci_dev->irq, dev);
if (np->msi_flags & NV_MSI_ENABLED) {
pci_disable_msi(np->pci_dev);
np->msi_flags &= ~NV_MSI_ENABLED;
}
}
}
static int nv_open(struct net_device *dev) static int nv_open(struct net_device *dev)
{ {
struct fe_priv *np = netdev_priv(dev); struct fe_priv *np = netdev_priv(dev);
...@@ -2881,16 +2720,12 @@ static int nv_open(struct net_device *dev) ...@@ -2881,16 +2720,12 @@ static int nv_open(struct net_device *dev)
udelay(10); udelay(10);
writel(readl(base + NvRegPowerState) | NVREG_POWERSTATE_VALID, base + NvRegPowerState); writel(readl(base + NvRegPowerState) | NVREG_POWERSTATE_VALID, base + NvRegPowerState);
nv_disable_hw_interrupts(dev, np->irqmask); writel(0, base + NvRegIrqMask);
pci_push(base); pci_push(base);
writel(NVREG_MIISTAT_MASK2, base + NvRegMIIStatus); writel(NVREG_MIISTAT_MASK2, base + NvRegMIIStatus);
writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
pci_push(base); pci_push(base);
if (nv_request_irq(dev)) {
goto out_drain;
}
if (np->msi_flags & NV_MSI_X_CAPABLE) { if (np->msi_flags & NV_MSI_X_CAPABLE) {
for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) { for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) {
np->msi_x_entry[i].entry = i; np->msi_x_entry[i].entry = i;
...@@ -2964,7 +2799,7 @@ static int nv_open(struct net_device *dev) ...@@ -2964,7 +2799,7 @@ static int nv_open(struct net_device *dev)
} }
/* ask for interrupts */ /* ask for interrupts */
nv_enable_hw_interrupts(dev, np->irqmask); writel(np->irqmask, base + NvRegIrqMask);
spin_lock_irq(&np->lock); spin_lock_irq(&np->lock);
writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA); writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA);
...@@ -3008,6 +2843,7 @@ static int nv_close(struct net_device *dev) ...@@ -3008,6 +2843,7 @@ static int nv_close(struct net_device *dev)
{ {
struct fe_priv *np = netdev_priv(dev); struct fe_priv *np = netdev_priv(dev);
u8 __iomem *base; u8 __iomem *base;
int i;
spin_lock_irq(&np->lock); spin_lock_irq(&np->lock);
np->in_shutdown = 1; np->in_shutdown = 1;
...@@ -3025,13 +2861,31 @@ static int nv_close(struct net_device *dev) ...@@ -3025,13 +2861,31 @@ static int nv_close(struct net_device *dev)
/* disable interrupts on the nic or we will lock up */ /* disable interrupts on the nic or we will lock up */
base = get_hwbase(dev); base = get_hwbase(dev);
nv_disable_hw_interrupts(dev, np->irqmask); if (np->msi_flags & NV_MSI_X_ENABLED) {
writel(np->irqmask, base + NvRegIrqMask);
} else {
if (np->msi_flags & NV_MSI_ENABLED)
writel(0, base + NvRegMSIIrqMask);
writel(0, base + NvRegIrqMask);
}
pci_push(base); pci_push(base);
dprintk(KERN_INFO "%s: Irqmask is zero again\n", dev->name); dprintk(KERN_INFO "%s: Irqmask is zero again\n", dev->name);
spin_unlock_irq(&np->lock); spin_unlock_irq(&np->lock);
nv_free_irq(dev); if (np->msi_flags & NV_MSI_X_ENABLED) {
for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) {
free_irq(np->msi_x_entry[i].vector, dev);
}
pci_disable_msix(np->pci_dev);
np->msi_flags &= ~NV_MSI_X_ENABLED;
} else {
free_irq(np->pci_dev->irq, dev);
if (np->msi_flags & NV_MSI_ENABLED) {
pci_disable_msi(np->pci_dev);
np->msi_flags &= ~NV_MSI_ENABLED;
}
}
drain_ring(dev); drain_ring(dev);
...@@ -3120,18 +2974,20 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i ...@@ -3120,18 +2974,20 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
if (id->driver_data & DEV_HAS_HIGH_DMA) { if (id->driver_data & DEV_HAS_HIGH_DMA) {
/* packet format 3: supports 40-bit addressing */ /* packet format 3: supports 40-bit addressing */
np->desc_ver = DESC_VER_3; np->desc_ver = DESC_VER_3;
np->txrxctl_bits = NVREG_TXRXCTL_DESC_3;
if (pci_set_dma_mask(pci_dev, DMA_39BIT_MASK)) { if (pci_set_dma_mask(pci_dev, DMA_39BIT_MASK)) {
printk(KERN_INFO "forcedeth: 64-bit DMA failed, using 32-bit addressing for device %s.\n", printk(KERN_INFO "forcedeth: 64-bit DMA failed, using 32-bit addressing for device %s.\n",
pci_name(pci_dev)); pci_name(pci_dev));
} else { } else {
dev->features |= NETIF_F_HIGHDMA;
printk(KERN_INFO "forcedeth: using HIGHDMA\n");
}
if (pci_set_consistent_dma_mask(pci_dev, 0x0000007fffffffffULL)) { if (pci_set_consistent_dma_mask(pci_dev, 0x0000007fffffffffULL)) {
printk(KERN_INFO "forcedeth: 64-bit DMA (consistent) failed for device %s.\n", printk(KERN_INFO "forcedeth: 64-bit DMA (consistent) failed for device %s.\n",
pci_name(pci_dev)); pci_name(pci_dev));
goto out_relreg;
} else {
dev->features |= NETIF_F_HIGHDMA;
printk(KERN_INFO "forcedeth: using HIGHDMA\n");
}
} }
np->txrxctl_bits = NVREG_TXRXCTL_DESC_3;
} else if (id->driver_data & DEV_HAS_LARGEDESC) { } else if (id->driver_data & DEV_HAS_LARGEDESC) {
/* packet format 2: supports jumbo frames */ /* packet format 2: supports jumbo frames */
np->desc_ver = DESC_VER_2; np->desc_ver = DESC_VER_2;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment