Commit 4debb9ea authored by David S. Miller's avatar David S. Miller

[NET]: Close NETIF_F_LLTX race conditions.

When drivers other than loopback were using the LLTX
feature a race window was present.  While sending
queued packets, the packet scheduler layer drops the
queue lock then calls directly into the drivers xmit
handler.  The driver then grabs it's private TX lock
and goes to work.

However, as soon as we've dropped the queue lock another
thread doing TX processing for that card can execute
a netif_stop_queue() due to the TX queue filling up.
This race window causes problems because a properly coded
driver should never end up in it's ->hard_start_xmit()
handler if the queue on the device has been stopped and
we even BUG() trap for this condition in all of the device
drivers.  That is how this race window was discovered
by Roland and the Infiniband folks.

Various suggestions were made to close this race.  One
of which involved holding onto the queue lock all the
way into the ->hard_start_xmit() routine.  Then having
the driver drop that lock only after taking it's private
TX lock.  This solution was deemed grotty because it is
not wise to put queueing discipline internals into the
device drivers.

The solution taken here, which is based upon ideas from
Stephen Hemminger, is twofold:

1) Leave LLTX around for purely software devices that
   need no locking at all for TX processing.  The existing
   example is loopback, although all tunnel devices could
   be converted in this way too.

2) Stop trying to use LLTX for the other devices.  Instead
   achieve the same goal using a different mechanism.

For #2, the thing we were trying to achieve with LLTX
was to eliminate excess locking.  We accomplish that
now by letting the device driver use dev->xmit_lock directly
instead of a seperate priv->tx_lock of some sort.

In order to allow that, we had to turn dev->xmit_lock into
a hardware IRQ disabling lock instead of a BH disabling one.
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent a3c24c0b
...@@ -45,10 +45,9 @@ dev->hard_start_xmit: ...@@ -45,10 +45,9 @@ dev->hard_start_xmit:
Synchronization: dev->xmit_lock spinlock. Synchronization: dev->xmit_lock spinlock.
When the driver sets NETIF_F_LLTX in dev->features this will be When the driver sets NETIF_F_LLTX in dev->features this will be
called without holding xmit_lock. In this case the driver called without holding xmit_lock. In this case the driver
has to lock by itself when needed. It is recommended to use a try lock has to execute it's transmission routine in a completely lockless
for this and return -1 when the spin lock fails. manner. It is recommended only for queueless devices such
The locking there should also properly protect against loopback and tunnels.
set_multicast_list
Context: BHs disabled Context: BHs disabled
Notes: netif_queue_stopped() is guaranteed false Notes: netif_queue_stopped() is guaranteed false
Return codes: Return codes:
...@@ -56,8 +55,6 @@ dev->hard_start_xmit: ...@@ -56,8 +55,6 @@ dev->hard_start_xmit:
o NETDEV_TX_BUSY Cannot transmit packet, try later o NETDEV_TX_BUSY Cannot transmit packet, try later
Usually a bug, means queue start/stop flow control is broken in Usually a bug, means queue start/stop flow control is broken in
the driver. Note: the driver must NOT put the skb in its DMA ring. the driver. Note: the driver must NOT put the skb in its DMA ring.
o NETDEV_TX_LOCKED Locking failed, please retry quickly.
Only valid when NETIF_F_LLTX is set.
dev->tx_timeout: dev->tx_timeout:
Synchronization: dev->xmit_lock spinlock. Synchronization: dev->xmit_lock spinlock.
......
...@@ -104,10 +104,10 @@ struct ipoib_buf { ...@@ -104,10 +104,10 @@ struct ipoib_buf {
}; };
/* /*
* Device private locking: tx_lock protects members used in TX fast * Device private locking: netdev->xmit_lock protects members used
* path (and we use LLTX so upper layers don't do extra locking). * in TX fast path.
* lock protects everything else. lock nests inside of tx_lock (ie * lock protects everything else. lock nests inside of xmit_lock (ie
* tx_lock must be acquired first if needed). * xmit_lock must be acquired first if needed).
*/ */
struct ipoib_dev_priv { struct ipoib_dev_priv {
spinlock_t lock; spinlock_t lock;
...@@ -150,7 +150,6 @@ struct ipoib_dev_priv { ...@@ -150,7 +150,6 @@ struct ipoib_dev_priv {
struct ipoib_buf *rx_ring; struct ipoib_buf *rx_ring;
spinlock_t tx_lock;
struct ipoib_buf *tx_ring; struct ipoib_buf *tx_ring;
unsigned tx_head; unsigned tx_head;
unsigned tx_tail; unsigned tx_tail;
......
...@@ -247,12 +247,12 @@ static void ipoib_ib_handle_wc(struct net_device *dev, ...@@ -247,12 +247,12 @@ static void ipoib_ib_handle_wc(struct net_device *dev,
dev_kfree_skb_any(tx_req->skb); dev_kfree_skb_any(tx_req->skb);
spin_lock_irqsave(&priv->tx_lock, flags); spin_lock_irqsave(&dev->xmit_lock, flags);
++priv->tx_tail; ++priv->tx_tail;
if (netif_queue_stopped(dev) && if (netif_queue_stopped(dev) &&
priv->tx_head - priv->tx_tail <= IPOIB_TX_RING_SIZE / 2) priv->tx_head - priv->tx_tail <= IPOIB_TX_RING_SIZE / 2)
netif_wake_queue(dev); netif_wake_queue(dev);
spin_unlock_irqrestore(&priv->tx_lock, flags); spin_unlock_irqrestore(&dev->xmit_lock, flags);
if (wc->status != IB_WC_SUCCESS && if (wc->status != IB_WC_SUCCESS &&
wc->status != IB_WC_WR_FLUSH_ERR) wc->status != IB_WC_WR_FLUSH_ERR)
......
...@@ -411,7 +411,7 @@ static void neigh_add_path(struct sk_buff *skb, struct net_device *dev) ...@@ -411,7 +411,7 @@ static void neigh_add_path(struct sk_buff *skb, struct net_device *dev)
/* /*
* We can only be called from ipoib_start_xmit, so we're * We can only be called from ipoib_start_xmit, so we're
* inside tx_lock -- no need to save/restore flags. * inside dev->xmit_lock -- no need to save/restore flags.
*/ */
spin_lock(&priv->lock); spin_lock(&priv->lock);
...@@ -483,7 +483,7 @@ static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev, ...@@ -483,7 +483,7 @@ static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
/* /*
* We can only be called from ipoib_start_xmit, so we're * We can only be called from ipoib_start_xmit, so we're
* inside tx_lock -- no need to save/restore flags. * inside dev->xmit_lock -- no need to save/restore flags.
*/ */
spin_lock(&priv->lock); spin_lock(&priv->lock);
...@@ -526,27 +526,11 @@ static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev, ...@@ -526,27 +526,11 @@ static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
spin_unlock(&priv->lock); spin_unlock(&priv->lock);
} }
/* Called with dev->xmit_lock held and IRQs disabled. */
static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev) static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
{ {
struct ipoib_dev_priv *priv = netdev_priv(dev); struct ipoib_dev_priv *priv = netdev_priv(dev);
struct ipoib_neigh *neigh; struct ipoib_neigh *neigh;
unsigned long flags;
local_irq_save(flags);
if (!spin_trylock(&priv->tx_lock)) {
local_irq_restore(flags);
return NETDEV_TX_LOCKED;
}
/*
* Check if our queue is stopped. Since we have the LLTX bit
* set, we can't rely on netif_stop_queue() preventing our
* xmit function from being called with a full queue.
*/
if (unlikely(netif_queue_stopped(dev))) {
spin_unlock_irqrestore(&priv->tx_lock, flags);
return NETDEV_TX_BUSY;
}
if (skb->dst && skb->dst->neighbour) { if (skb->dst && skb->dst->neighbour) {
if (unlikely(!*to_ipoib_neigh(skb->dst->neighbour))) { if (unlikely(!*to_ipoib_neigh(skb->dst->neighbour))) {
...@@ -601,7 +585,6 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -601,7 +585,6 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
} }
out: out:
spin_unlock_irqrestore(&priv->tx_lock, flags);
return NETDEV_TX_OK; return NETDEV_TX_OK;
} }
...@@ -797,7 +780,7 @@ static void ipoib_setup(struct net_device *dev) ...@@ -797,7 +780,7 @@ static void ipoib_setup(struct net_device *dev)
dev->addr_len = INFINIBAND_ALEN; dev->addr_len = INFINIBAND_ALEN;
dev->type = ARPHRD_INFINIBAND; dev->type = ARPHRD_INFINIBAND;
dev->tx_queue_len = IPOIB_TX_RING_SIZE * 2; dev->tx_queue_len = IPOIB_TX_RING_SIZE * 2;
dev->features = NETIF_F_VLAN_CHALLENGED | NETIF_F_LLTX; dev->features = NETIF_F_VLAN_CHALLENGED;
/* MTU will be reset when mcast join happens */ /* MTU will be reset when mcast join happens */
dev->mtu = IPOIB_PACKET_SIZE - IPOIB_ENCAP_LEN; dev->mtu = IPOIB_PACKET_SIZE - IPOIB_ENCAP_LEN;
...@@ -812,7 +795,6 @@ static void ipoib_setup(struct net_device *dev) ...@@ -812,7 +795,6 @@ static void ipoib_setup(struct net_device *dev)
priv->dev = dev; priv->dev = dev;
spin_lock_init(&priv->lock); spin_lock_init(&priv->lock);
spin_lock_init(&priv->tx_lock);
init_MUTEX(&priv->mcast_mutex); init_MUTEX(&priv->mcast_mutex);
init_MUTEX(&priv->vlan_mutex); init_MUTEX(&priv->vlan_mutex);
......
...@@ -209,7 +209,6 @@ struct e1000_adapter { ...@@ -209,7 +209,6 @@ struct e1000_adapter {
/* TX */ /* TX */
struct e1000_desc_ring tx_ring; struct e1000_desc_ring tx_ring;
spinlock_t tx_lock;
uint32_t txd_cmd; uint32_t txd_cmd;
uint32_t tx_int_delay; uint32_t tx_int_delay;
uint32_t tx_abs_int_delay; uint32_t tx_abs_int_delay;
......
...@@ -291,7 +291,9 @@ e1000_up(struct e1000_adapter *adapter) ...@@ -291,7 +291,9 @@ e1000_up(struct e1000_adapter *adapter)
e1000_phy_reset(&adapter->hw); e1000_phy_reset(&adapter->hw);
} }
spin_lock_irq(&netdev->xmit_lock);
e1000_set_multi(netdev); e1000_set_multi(netdev);
spin_unlock_irq(&netdev->xmit_lock);
e1000_restore_vlan(adapter); e1000_restore_vlan(adapter);
...@@ -520,9 +522,6 @@ e1000_probe(struct pci_dev *pdev, ...@@ -520,9 +522,6 @@ e1000_probe(struct pci_dev *pdev,
if(pci_using_dac) if(pci_using_dac)
netdev->features |= NETIF_F_HIGHDMA; netdev->features |= NETIF_F_HIGHDMA;
/* hard_start_xmit is safe against parallel locking */
netdev->features |= NETIF_F_LLTX;
/* before reading the EEPROM, reset the controller to /* before reading the EEPROM, reset the controller to
* put the device in a known good starting state */ * put the device in a known good starting state */
...@@ -732,7 +731,6 @@ e1000_sw_init(struct e1000_adapter *adapter) ...@@ -732,7 +731,6 @@ e1000_sw_init(struct e1000_adapter *adapter)
atomic_set(&adapter->irq_sem, 1); atomic_set(&adapter->irq_sem, 1);
spin_lock_init(&adapter->stats_lock); spin_lock_init(&adapter->stats_lock);
spin_lock_init(&adapter->tx_lock);
return 0; return 0;
} }
...@@ -1293,6 +1291,8 @@ e1000_set_mac(struct net_device *netdev, void *p) ...@@ -1293,6 +1291,8 @@ e1000_set_mac(struct net_device *netdev, void *p)
* list or the network interface flags are updated. This routine is * list or the network interface flags are updated. This routine is
* responsible for configuring the hardware for proper multicast, * responsible for configuring the hardware for proper multicast,
* promiscuous mode, and all-multi behavior. * promiscuous mode, and all-multi behavior.
*
* Called with netdev->xmit_lock held and IRQs disabled.
**/ **/
static void static void
...@@ -1304,12 +1304,9 @@ e1000_set_multi(struct net_device *netdev) ...@@ -1304,12 +1304,9 @@ e1000_set_multi(struct net_device *netdev)
uint32_t rctl; uint32_t rctl;
uint32_t hash_value; uint32_t hash_value;
int i; int i;
unsigned long flags;
/* Check for Promiscuous and All Multicast modes */ /* Check for Promiscuous and All Multicast modes */
spin_lock_irqsave(&adapter->tx_lock, flags);
rctl = E1000_READ_REG(hw, RCTL); rctl = E1000_READ_REG(hw, RCTL);
if(netdev->flags & IFF_PROMISC) { if(netdev->flags & IFF_PROMISC) {
...@@ -1358,8 +1355,6 @@ e1000_set_multi(struct net_device *netdev) ...@@ -1358,8 +1355,6 @@ e1000_set_multi(struct net_device *netdev)
if(hw->mac_type == e1000_82542_rev2_0) if(hw->mac_type == e1000_82542_rev2_0)
e1000_leave_82542_rst(adapter); e1000_leave_82542_rst(adapter);
spin_unlock_irqrestore(&adapter->tx_lock, flags);
} }
/* Need to wait a few seconds after link up to get diagnostic information from /* Need to wait a few seconds after link up to get diagnostic information from
...@@ -1786,6 +1781,8 @@ e1000_82547_fifo_workaround(struct e1000_adapter *adapter, struct sk_buff *skb) ...@@ -1786,6 +1781,8 @@ e1000_82547_fifo_workaround(struct e1000_adapter *adapter, struct sk_buff *skb)
} }
#define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1 ) #define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1 )
/* Called with dev->xmit_lock held and interrupts disabled. */
static int static int
e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev) e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
{ {
...@@ -1794,7 +1791,6 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev) ...@@ -1794,7 +1791,6 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
unsigned int max_txd_pwr = E1000_MAX_TXD_PWR; unsigned int max_txd_pwr = E1000_MAX_TXD_PWR;
unsigned int tx_flags = 0; unsigned int tx_flags = 0;
unsigned int len = skb->len; unsigned int len = skb->len;
unsigned long flags;
unsigned int nr_frags = 0; unsigned int nr_frags = 0;
unsigned int mss = 0; unsigned int mss = 0;
int count = 0; int count = 0;
...@@ -1838,18 +1834,10 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev) ...@@ -1838,18 +1834,10 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
if(adapter->pcix_82544) if(adapter->pcix_82544)
count += nr_frags; count += nr_frags;
local_irq_save(flags);
if (!spin_trylock(&adapter->tx_lock)) {
/* Collision - tell upper layer to requeue */
local_irq_restore(flags);
return NETDEV_TX_LOCKED;
}
/* need: count + 2 desc gap to keep tail from touching /* need: count + 2 desc gap to keep tail from touching
* head, otherwise try next time */ * head, otherwise try next time */
if(unlikely(E1000_DESC_UNUSED(&adapter->tx_ring) < count + 2)) { if(unlikely(E1000_DESC_UNUSED(&adapter->tx_ring) < count + 2)) {
netif_stop_queue(netdev); netif_stop_queue(netdev);
spin_unlock_irqrestore(&adapter->tx_lock, flags);
return NETDEV_TX_BUSY; return NETDEV_TX_BUSY;
} }
...@@ -1857,7 +1845,6 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev) ...@@ -1857,7 +1845,6 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
if(unlikely(e1000_82547_fifo_workaround(adapter, skb))) { if(unlikely(e1000_82547_fifo_workaround(adapter, skb))) {
netif_stop_queue(netdev); netif_stop_queue(netdev);
mod_timer(&adapter->tx_fifo_stall_timer, jiffies); mod_timer(&adapter->tx_fifo_stall_timer, jiffies);
spin_unlock_irqrestore(&adapter->tx_lock, flags);
return NETDEV_TX_BUSY; return NETDEV_TX_BUSY;
} }
} }
...@@ -1884,7 +1871,6 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev) ...@@ -1884,7 +1871,6 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
if(unlikely(E1000_DESC_UNUSED(&adapter->tx_ring) < MAX_SKB_FRAGS + 2)) if(unlikely(E1000_DESC_UNUSED(&adapter->tx_ring) < MAX_SKB_FRAGS + 2))
netif_stop_queue(netdev); netif_stop_queue(netdev);
spin_unlock_irqrestore(&adapter->tx_lock, flags);
return NETDEV_TX_OK; return NETDEV_TX_OK;
} }
...@@ -2234,13 +2220,13 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter) ...@@ -2234,13 +2220,13 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter)
tx_ring->next_to_clean = i; tx_ring->next_to_clean = i;
spin_lock(&adapter->tx_lock); spin_lock(&netdev->xmit_lock);
if(unlikely(cleaned && netif_queue_stopped(netdev) && if(unlikely(cleaned && netif_queue_stopped(netdev) &&
netif_carrier_ok(netdev))) netif_carrier_ok(netdev)))
netif_wake_queue(netdev); netif_wake_queue(netdev);
spin_unlock(&adapter->tx_lock); spin_unlock(&netdev->xmit_lock);
return cleaned; return cleaned;
} }
...@@ -2819,7 +2805,10 @@ e1000_suspend(struct pci_dev *pdev, uint32_t state) ...@@ -2819,7 +2805,10 @@ e1000_suspend(struct pci_dev *pdev, uint32_t state)
if(wufc) { if(wufc) {
e1000_setup_rctl(adapter); e1000_setup_rctl(adapter);
spin_lock_irq(&netdev->xmit_lock);
e1000_set_multi(netdev); e1000_set_multi(netdev);
spin_unlock_irq(&netdev->xmit_lock);
/* turn on all-multi mode if wake on multicast is enabled */ /* turn on all-multi mode if wake on multicast is enabled */
if(adapter->wol & E1000_WUFC_MC) { if(adapter->wol & E1000_WUFC_MC) {
......
...@@ -835,9 +835,9 @@ static int gem_poll(struct net_device *dev, int *budget) ...@@ -835,9 +835,9 @@ static int gem_poll(struct net_device *dev, int *budget)
} }
/* Run TX completion thread */ /* Run TX completion thread */
spin_lock(&gp->tx_lock); spin_lock(&dev->xmit_lock);
gem_tx(dev, gp, gp->status); gem_tx(dev, gp, gp->status);
spin_unlock(&gp->tx_lock); spin_unlock(&dev->xmit_lock);
spin_unlock_irqrestore(&gp->lock, flags); spin_unlock_irqrestore(&gp->lock, flags);
...@@ -932,12 +932,12 @@ static void gem_tx_timeout(struct net_device *dev) ...@@ -932,12 +932,12 @@ static void gem_tx_timeout(struct net_device *dev)
readl(gp->regs + MAC_RXCFG)); readl(gp->regs + MAC_RXCFG));
spin_lock_irq(&gp->lock); spin_lock_irq(&gp->lock);
spin_lock(&gp->tx_lock); spin_lock(&dev->xmit_lock);
gp->reset_task_pending = 2; gp->reset_task_pending = 2;
schedule_work(&gp->reset_task); schedule_work(&gp->reset_task);
spin_unlock(&gp->tx_lock); spin_unlock(&dev->xmit_lock);
spin_unlock_irq(&gp->lock); spin_unlock_irq(&gp->lock);
} }
...@@ -955,7 +955,6 @@ static int gem_start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -955,7 +955,6 @@ static int gem_start_xmit(struct sk_buff *skb, struct net_device *dev)
struct gem *gp = dev->priv; struct gem *gp = dev->priv;
int entry; int entry;
u64 ctrl; u64 ctrl;
unsigned long flags;
ctrl = 0; ctrl = 0;
if (skb->ip_summed == CHECKSUM_HW) { if (skb->ip_summed == CHECKSUM_HW) {
...@@ -969,17 +968,9 @@ static int gem_start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -969,17 +968,9 @@ static int gem_start_xmit(struct sk_buff *skb, struct net_device *dev)
(csum_stuff_off << 21)); (csum_stuff_off << 21));
} }
local_irq_save(flags);
if (!spin_trylock(&gp->tx_lock)) {
/* Tell upper layer to requeue */
local_irq_restore(flags);
return NETDEV_TX_LOCKED;
}
/* This is a hard error, log it. */ /* This is a hard error, log it. */
if (TX_BUFFS_AVAIL(gp) <= (skb_shinfo(skb)->nr_frags + 1)) { if (TX_BUFFS_AVAIL(gp) <= (skb_shinfo(skb)->nr_frags + 1)) {
netif_stop_queue(dev); netif_stop_queue(dev);
spin_unlock_irqrestore(&gp->tx_lock, flags);
printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n", printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n",
dev->name); dev->name);
return NETDEV_TX_BUSY; return NETDEV_TX_BUSY;
...@@ -1066,7 +1057,6 @@ static int gem_start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -1066,7 +1057,6 @@ static int gem_start_xmit(struct sk_buff *skb, struct net_device *dev)
dev->name, entry, skb->len); dev->name, entry, skb->len);
mb(); mb();
writel(gp->tx_new, gp->regs + TXDMA_KICK); writel(gp->tx_new, gp->regs + TXDMA_KICK);
spin_unlock_irqrestore(&gp->tx_lock, flags);
dev->trans_start = jiffies; dev->trans_start = jiffies;
...@@ -1097,11 +1087,11 @@ static int gem_change_mtu(struct net_device *dev, int new_mtu) ...@@ -1097,11 +1087,11 @@ static int gem_change_mtu(struct net_device *dev, int new_mtu)
} }
spin_lock_irq(&gp->lock); spin_lock_irq(&gp->lock);
spin_lock(&gp->tx_lock); spin_lock(&dev->xmit_lock);
dev->mtu = new_mtu; dev->mtu = new_mtu;
gp->reset_task_pending = 1; gp->reset_task_pending = 1;
schedule_work(&gp->reset_task); schedule_work(&gp->reset_task);
spin_unlock(&gp->tx_lock); spin_unlock(&dev->xmit_lock);
spin_unlock_irq(&gp->lock); spin_unlock_irq(&gp->lock);
flush_scheduled_work(); flush_scheduled_work();
...@@ -1111,7 +1101,7 @@ static int gem_change_mtu(struct net_device *dev, int new_mtu) ...@@ -1111,7 +1101,7 @@ static int gem_change_mtu(struct net_device *dev, int new_mtu)
#define STOP_TRIES 32 #define STOP_TRIES 32
/* Must be invoked under gp->lock and gp->tx_lock. */ /* Must be invoked under gp->lock and dev->xmit_lock. */
static void gem_stop(struct gem *gp) static void gem_stop(struct gem *gp)
{ {
int limit; int limit;
...@@ -1137,7 +1127,7 @@ static void gem_stop(struct gem *gp) ...@@ -1137,7 +1127,7 @@ static void gem_stop(struct gem *gp)
printk(KERN_ERR "%s: SW reset is ghetto.\n", gp->dev->name); printk(KERN_ERR "%s: SW reset is ghetto.\n", gp->dev->name);
} }
/* Must be invoked under gp->lock and gp->tx_lock. */ /* Must be invoked under gp->lock and dev->xmit_lock. */
static void gem_start_dma(struct gem *gp) static void gem_start_dma(struct gem *gp)
{ {
unsigned long val; unsigned long val;
...@@ -1162,7 +1152,7 @@ static void gem_start_dma(struct gem *gp) ...@@ -1162,7 +1152,7 @@ static void gem_start_dma(struct gem *gp)
} }
/* Must be invoked under gp->lock and gp->tx_lock. */ /* Must be invoked under gp->lock and dev->xmit_lock. */
// XXX dbl check what that function should do when called on PCS PHY // XXX dbl check what that function should do when called on PCS PHY
static void gem_begin_auto_negotiation(struct gem *gp, struct ethtool_cmd *ep) static void gem_begin_auto_negotiation(struct gem *gp, struct ethtool_cmd *ep)
{ {
...@@ -1249,7 +1239,7 @@ static void gem_begin_auto_negotiation(struct gem *gp, struct ethtool_cmd *ep) ...@@ -1249,7 +1239,7 @@ static void gem_begin_auto_negotiation(struct gem *gp, struct ethtool_cmd *ep)
/* A link-up condition has occurred, initialize and enable the /* A link-up condition has occurred, initialize and enable the
* rest of the chip. * rest of the chip.
* *
* Must be invoked under gp->lock and gp->tx_lock. * Must be invoked under gp->lock and dev->xmit_lock.
*/ */
static int gem_set_link_modes(struct gem *gp) static int gem_set_link_modes(struct gem *gp)
{ {
...@@ -1356,7 +1346,7 @@ static int gem_set_link_modes(struct gem *gp) ...@@ -1356,7 +1346,7 @@ static int gem_set_link_modes(struct gem *gp)
return 0; return 0;
} }
/* Must be invoked under gp->lock and gp->tx_lock. */ /* Must be invoked under gp->lock and dev->xmit_lock. */
static int gem_mdio_link_not_up(struct gem *gp) static int gem_mdio_link_not_up(struct gem *gp)
{ {
switch (gp->lstate) { switch (gp->lstate) {
...@@ -1414,7 +1404,7 @@ static void gem_reset_task(void *data) ...@@ -1414,7 +1404,7 @@ static void gem_reset_task(void *data)
netif_poll_disable(gp->dev); netif_poll_disable(gp->dev);
spin_lock_irq(&gp->lock); spin_lock_irq(&gp->lock);
spin_lock(&gp->tx_lock); spin_lock(&gp->dev->xmit_lock);
if (gp->hw_running && gp->opened) { if (gp->hw_running && gp->opened) {
netif_stop_queue(gp->dev); netif_stop_queue(gp->dev);
...@@ -1430,7 +1420,7 @@ static void gem_reset_task(void *data) ...@@ -1430,7 +1420,7 @@ static void gem_reset_task(void *data)
} }
gp->reset_task_pending = 0; gp->reset_task_pending = 0;
spin_unlock(&gp->tx_lock); spin_unlock(&gp->dev->xmit_lock);
spin_unlock_irq(&gp->lock); spin_unlock_irq(&gp->lock);
netif_poll_enable(gp->dev); netif_poll_enable(gp->dev);
} }
...@@ -1444,7 +1434,7 @@ static void gem_link_timer(unsigned long data) ...@@ -1444,7 +1434,7 @@ static void gem_link_timer(unsigned long data)
return; return;
spin_lock_irq(&gp->lock); spin_lock_irq(&gp->lock);
spin_lock(&gp->tx_lock); spin_lock(&gp->dev->xmit_lock);
/* If the link of task is still pending, we just /* If the link of task is still pending, we just
* reschedule the link timer * reschedule the link timer
...@@ -1514,11 +1504,11 @@ static void gem_link_timer(unsigned long data) ...@@ -1514,11 +1504,11 @@ static void gem_link_timer(unsigned long data)
restart: restart:
mod_timer(&gp->link_timer, jiffies + ((12 * HZ) / 10)); mod_timer(&gp->link_timer, jiffies + ((12 * HZ) / 10));
out_unlock: out_unlock:
spin_unlock(&gp->tx_lock); spin_unlock(&gp->dev->xmit_lock);
spin_unlock_irq(&gp->lock); spin_unlock_irq(&gp->lock);
} }
/* Must be invoked under gp->lock and gp->tx_lock. */ /* Must be invoked under gp->lock and dev->xmit_lock. */
static void gem_clean_rings(struct gem *gp) static void gem_clean_rings(struct gem *gp)
{ {
struct gem_init_block *gb = gp->init_block; struct gem_init_block *gb = gp->init_block;
...@@ -1569,7 +1559,7 @@ static void gem_clean_rings(struct gem *gp) ...@@ -1569,7 +1559,7 @@ static void gem_clean_rings(struct gem *gp)
} }
} }
/* Must be invoked under gp->lock and gp->tx_lock. */ /* Must be invoked under gp->lock and dev->xmit_lock. */
static void gem_init_rings(struct gem *gp) static void gem_init_rings(struct gem *gp)
{ {
struct gem_init_block *gb = gp->init_block; struct gem_init_block *gb = gp->init_block;
...@@ -1619,7 +1609,7 @@ static void gem_init_rings(struct gem *gp) ...@@ -1619,7 +1609,7 @@ static void gem_init_rings(struct gem *gp)
wmb(); wmb();
} }
/* Must be invoked under gp->lock and gp->tx_lock. */ /* Must be invoked under gp->lock and dev->xmit_lock. */
static void gem_init_phy(struct gem *gp) static void gem_init_phy(struct gem *gp)
{ {
u32 mifcfg; u32 mifcfg;
...@@ -1757,7 +1747,7 @@ static void gem_init_phy(struct gem *gp) ...@@ -1757,7 +1747,7 @@ static void gem_init_phy(struct gem *gp)
} }
} }
/* Must be invoked under gp->lock and gp->tx_lock. */ /* Must be invoked under gp->lock and dev->xmit_lock. */
static void gem_init_dma(struct gem *gp) static void gem_init_dma(struct gem *gp)
{ {
u64 desc_dma = (u64) gp->gblock_dvma; u64 desc_dma = (u64) gp->gblock_dvma;
...@@ -1795,7 +1785,7 @@ static void gem_init_dma(struct gem *gp) ...@@ -1795,7 +1785,7 @@ static void gem_init_dma(struct gem *gp)
gp->regs + RXDMA_BLANK); gp->regs + RXDMA_BLANK);
} }
/* Must be invoked under gp->lock and gp->tx_lock. */ /* Must be invoked under dev->xmit_lock. */
static u32 static u32
gem_setup_multicast(struct gem *gp) gem_setup_multicast(struct gem *gp)
{ {
...@@ -1838,7 +1828,7 @@ gem_setup_multicast(struct gem *gp) ...@@ -1838,7 +1828,7 @@ gem_setup_multicast(struct gem *gp)
return rxcfg; return rxcfg;
} }
/* Must be invoked under gp->lock and gp->tx_lock. */ /* Must be invoked under gp->lock and dev->xmit_lock. */
static void gem_init_mac(struct gem *gp) static void gem_init_mac(struct gem *gp)
{ {
unsigned char *e = &gp->dev->dev_addr[0]; unsigned char *e = &gp->dev->dev_addr[0];
...@@ -1916,7 +1906,7 @@ static void gem_init_mac(struct gem *gp) ...@@ -1916,7 +1906,7 @@ static void gem_init_mac(struct gem *gp)
writel(0xffffffff, gp->regs + MAC_MCMASK); writel(0xffffffff, gp->regs + MAC_MCMASK);
} }
/* Must be invoked under gp->lock and gp->tx_lock. */ /* Must be invoked under gp->lock and dev->xmit_lock. */
static void gem_init_pause_thresholds(struct gem *gp) static void gem_init_pause_thresholds(struct gem *gp)
{ {
u32 cfg; u32 cfg;
...@@ -2052,7 +2042,7 @@ static int gem_check_invariants(struct gem *gp) ...@@ -2052,7 +2042,7 @@ static int gem_check_invariants(struct gem *gp)
return 0; return 0;
} }
/* Must be invoked under gp->lock and gp->tx_lock. */ /* Must be invoked under gp->lock and dev->xmit_lock. */
static void gem_init_hw(struct gem *gp, int restart_link) static void gem_init_hw(struct gem *gp, int restart_link)
{ {
/* On Apple's gmac, I initialize the PHY only after /* On Apple's gmac, I initialize the PHY only after
...@@ -2150,11 +2140,11 @@ static void gem_stop_phy(struct gem *gp) ...@@ -2150,11 +2140,11 @@ static void gem_stop_phy(struct gem *gp)
if (!gp->wake_on_lan) { if (!gp->wake_on_lan) {
spin_lock_irqsave(&gp->lock, flags); spin_lock_irqsave(&gp->lock, flags);
spin_lock(&gp->tx_lock); spin_lock(&gp->dev->xmit_lock);
gem_stop(gp); gem_stop(gp);
writel(MAC_TXRST_CMD, gp->regs + MAC_TXRST); writel(MAC_TXRST_CMD, gp->regs + MAC_TXRST);
writel(MAC_RXRST_CMD, gp->regs + MAC_RXRST); writel(MAC_RXRST_CMD, gp->regs + MAC_RXRST);
spin_unlock(&gp->tx_lock); spin_unlock(&gp->dev->xmit_lock);
spin_unlock_irqrestore(&gp->lock, flags); spin_unlock_irqrestore(&gp->lock, flags);
} }
...@@ -2202,9 +2192,9 @@ static void gem_shutdown(struct gem *gp) ...@@ -2202,9 +2192,9 @@ static void gem_shutdown(struct gem *gp)
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&gp->lock, flags); spin_lock_irqsave(&gp->lock, flags);
spin_lock(&gp->tx_lock); spin_lock(&gp->dev->xmit_lock);
gem_stop(gp); gem_stop(gp);
spin_unlock(&gp->tx_lock); spin_unlock(&gp->dev->xmit_lock);
spin_unlock_irqrestore(&gp->lock, flags); spin_unlock_irqrestore(&gp->lock, flags);
} }
} }
...@@ -2265,9 +2255,9 @@ static int gem_open(struct net_device *dev) ...@@ -2265,9 +2255,9 @@ static int gem_open(struct net_device *dev)
/* Reset the chip */ /* Reset the chip */
spin_lock_irq(&gp->lock); spin_lock_irq(&gp->lock);
spin_lock(&gp->tx_lock); spin_lock(&gp->dev->xmit_lock);
gem_stop(gp); gem_stop(gp);
spin_unlock(&gp->tx_lock); spin_unlock(&gp->dev->xmit_lock);
spin_unlock_irq(&gp->lock); spin_unlock_irq(&gp->lock);
gp->hw_running = 1; gp->hw_running = 1;
...@@ -2281,7 +2271,7 @@ static int gem_open(struct net_device *dev) ...@@ -2281,7 +2271,7 @@ static int gem_open(struct net_device *dev)
printk(KERN_ERR "%s: failed to request irq !\n", gp->dev->name); printk(KERN_ERR "%s: failed to request irq !\n", gp->dev->name);
spin_lock_irq(&gp->lock); spin_lock_irq(&gp->lock);
spin_lock(&gp->tx_lock); spin_lock(&gp->dev->xmit_lock);
#ifdef CONFIG_PPC_PMAC #ifdef CONFIG_PPC_PMAC
if (!hw_was_up && gp->pdev->vendor == PCI_VENDOR_ID_APPLE) if (!hw_was_up && gp->pdev->vendor == PCI_VENDOR_ID_APPLE)
gem_apple_powerdown(gp); gem_apple_powerdown(gp);
...@@ -2290,14 +2280,14 @@ static int gem_open(struct net_device *dev) ...@@ -2290,14 +2280,14 @@ static int gem_open(struct net_device *dev)
gp->pm_timer.expires = jiffies + 10*HZ; gp->pm_timer.expires = jiffies + 10*HZ;
add_timer(&gp->pm_timer); add_timer(&gp->pm_timer);
up(&gp->pm_sem); up(&gp->pm_sem);
spin_unlock(&gp->tx_lock); spin_unlock(&gp->dev->xmit_lock);
spin_unlock_irq(&gp->lock); spin_unlock_irq(&gp->lock);
return -EAGAIN; return -EAGAIN;
} }
spin_lock_irq(&gp->lock); spin_lock_irq(&gp->lock);
spin_lock(&gp->tx_lock); spin_lock(&gp->dev->xmit_lock);
/* Allocate & setup ring buffers */ /* Allocate & setup ring buffers */
gem_init_rings(gp); gem_init_rings(gp);
...@@ -2307,7 +2297,7 @@ static int gem_open(struct net_device *dev) ...@@ -2307,7 +2297,7 @@ static int gem_open(struct net_device *dev)
gp->opened = 1; gp->opened = 1;
spin_unlock(&gp->tx_lock); spin_unlock(&gp->dev->xmit_lock);
spin_unlock_irq(&gp->lock); spin_unlock_irq(&gp->lock);
up(&gp->pm_sem); up(&gp->pm_sem);
...@@ -2328,7 +2318,7 @@ static int gem_close(struct net_device *dev) ...@@ -2328,7 +2318,7 @@ static int gem_close(struct net_device *dev)
/* Stop traffic, mark us closed */ /* Stop traffic, mark us closed */
spin_lock_irq(&gp->lock); spin_lock_irq(&gp->lock);
spin_lock(&gp->tx_lock); spin_lock(&gp->dev->xmit_lock);
gp->opened = 0; gp->opened = 0;
...@@ -2343,7 +2333,7 @@ static int gem_close(struct net_device *dev) ...@@ -2343,7 +2333,7 @@ static int gem_close(struct net_device *dev)
/* Bye, the pm timer will finish the job */ /* Bye, the pm timer will finish the job */
free_irq(gp->pdev->irq, (void *) dev); free_irq(gp->pdev->irq, (void *) dev);
spin_unlock(&gp->tx_lock); spin_unlock(&gp->dev->xmit_lock);
spin_unlock_irq(&gp->lock); spin_unlock_irq(&gp->lock);
/* Fire the PM timer that will shut us down in about 10 seconds */ /* Fire the PM timer that will shut us down in about 10 seconds */
...@@ -2374,7 +2364,7 @@ static int gem_suspend(struct pci_dev *pdev, u32 state) ...@@ -2374,7 +2364,7 @@ static int gem_suspend(struct pci_dev *pdev, u32 state)
/* If the driver is opened, we stop the DMA */ /* If the driver is opened, we stop the DMA */
if (gp->opened) { if (gp->opened) {
spin_lock_irq(&gp->lock); spin_lock_irq(&gp->lock);
spin_lock(&gp->tx_lock); spin_lock(&gp->dev->xmit_lock);
/* Stop traffic, mark us closed */ /* Stop traffic, mark us closed */
netif_device_detach(dev); netif_device_detach(dev);
...@@ -2385,7 +2375,7 @@ static int gem_suspend(struct pci_dev *pdev, u32 state) ...@@ -2385,7 +2375,7 @@ static int gem_suspend(struct pci_dev *pdev, u32 state)
/* Get rid of ring buffers */ /* Get rid of ring buffers */
gem_clean_rings(gp); gem_clean_rings(gp);
spin_unlock(&gp->tx_lock); spin_unlock(&gp->dev->xmit_lock);
spin_unlock_irq(&gp->lock); spin_unlock_irq(&gp->lock);
if (gp->pdev->vendor == PCI_VENDOR_ID_APPLE) if (gp->pdev->vendor == PCI_VENDOR_ID_APPLE)
...@@ -2419,14 +2409,14 @@ static int gem_resume(struct pci_dev *pdev) ...@@ -2419,14 +2409,14 @@ static int gem_resume(struct pci_dev *pdev)
} }
#endif /* CONFIG_PPC_PMAC */ #endif /* CONFIG_PPC_PMAC */
spin_lock_irq(&gp->lock); spin_lock_irq(&gp->lock);
spin_lock(&gp->tx_lock); spin_lock(&gp->dev->xmit_lock);
gem_stop(gp); gem_stop(gp);
gp->hw_running = 1; gp->hw_running = 1;
gem_init_rings(gp); gem_init_rings(gp);
gem_init_hw(gp, 1); gem_init_hw(gp, 1);
spin_unlock(&gp->tx_lock); spin_unlock(&gp->dev->xmit_lock);
spin_unlock_irq(&gp->lock); spin_unlock_irq(&gp->lock);
netif_device_attach(dev); netif_device_attach(dev);
...@@ -2447,7 +2437,7 @@ static struct net_device_stats *gem_get_stats(struct net_device *dev) ...@@ -2447,7 +2437,7 @@ static struct net_device_stats *gem_get_stats(struct net_device *dev)
struct net_device_stats *stats = &gp->net_stats; struct net_device_stats *stats = &gp->net_stats;
spin_lock_irq(&gp->lock); spin_lock_irq(&gp->lock);
spin_lock(&gp->tx_lock); spin_lock(&dev->xmit_lock);
if (gp->hw_running) { if (gp->hw_running) {
stats->rx_crc_errors += readl(gp->regs + MAC_FCSERR); stats->rx_crc_errors += readl(gp->regs + MAC_FCSERR);
...@@ -2467,12 +2457,13 @@ static struct net_device_stats *gem_get_stats(struct net_device *dev) ...@@ -2467,12 +2457,13 @@ static struct net_device_stats *gem_get_stats(struct net_device *dev)
writel(0, gp->regs + MAC_LCOLL); writel(0, gp->regs + MAC_LCOLL);
} }
spin_unlock(&gp->tx_lock); spin_unlock(&dev->xmit_lock);
spin_unlock_irq(&gp->lock); spin_unlock_irq(&gp->lock);
return &gp->net_stats; return &gp->net_stats;
} }
/* Called with dev->xmit_lock held and IRQs disabled. */
static void gem_set_multicast(struct net_device *dev) static void gem_set_multicast(struct net_device *dev)
{ {
struct gem *gp = dev->priv; struct gem *gp = dev->priv;
...@@ -2482,9 +2473,6 @@ static void gem_set_multicast(struct net_device *dev) ...@@ -2482,9 +2473,6 @@ static void gem_set_multicast(struct net_device *dev)
if (!gp->hw_running) if (!gp->hw_running)
return; return;
spin_lock_irq(&gp->lock);
spin_lock(&gp->tx_lock);
netif_stop_queue(dev); netif_stop_queue(dev);
rxcfg = readl(gp->regs + MAC_RXCFG); rxcfg = readl(gp->regs + MAC_RXCFG);
...@@ -2507,9 +2495,6 @@ static void gem_set_multicast(struct net_device *dev) ...@@ -2507,9 +2495,6 @@ static void gem_set_multicast(struct net_device *dev)
writel(rxcfg, gp->regs + MAC_RXCFG); writel(rxcfg, gp->regs + MAC_RXCFG);
netif_wake_queue(dev); netif_wake_queue(dev);
spin_unlock(&gp->tx_lock);
spin_unlock_irq(&gp->lock);
} }
static void gem_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) static void gem_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
...@@ -2540,7 +2525,7 @@ static int gem_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) ...@@ -2540,7 +2525,7 @@ static int gem_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
/* Return current PHY settings */ /* Return current PHY settings */
spin_lock_irq(&gp->lock); spin_lock_irq(&gp->lock);
spin_lock(&gp->tx_lock); spin_lock(&dev->xmit_lock);
cmd->autoneg = gp->want_autoneg; cmd->autoneg = gp->want_autoneg;
cmd->speed = gp->phy_mii.speed; cmd->speed = gp->phy_mii.speed;
cmd->duplex = gp->phy_mii.duplex; cmd->duplex = gp->phy_mii.duplex;
...@@ -2552,7 +2537,7 @@ static int gem_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) ...@@ -2552,7 +2537,7 @@ static int gem_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
*/ */
if (cmd->advertising == 0) if (cmd->advertising == 0)
cmd->advertising = cmd->supported; cmd->advertising = cmd->supported;
spin_unlock(&gp->tx_lock); spin_unlock(&dev->xmit_lock);
spin_unlock_irq(&gp->lock); spin_unlock_irq(&gp->lock);
} else { // XXX PCS ? } else { // XXX PCS ?
cmd->supported = cmd->supported =
...@@ -2592,9 +2577,9 @@ static int gem_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) ...@@ -2592,9 +2577,9 @@ static int gem_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
/* Apply settings and restart link process. */ /* Apply settings and restart link process. */
spin_lock_irq(&gp->lock); spin_lock_irq(&gp->lock);
spin_lock(&gp->tx_lock); spin_lock(&dev->xmit_lock);
gem_begin_auto_negotiation(gp, cmd); gem_begin_auto_negotiation(gp, cmd);
spin_unlock(&gp->tx_lock); spin_unlock(&dev->xmit_lock);
spin_unlock_irq(&gp->lock); spin_unlock_irq(&gp->lock);
return 0; return 0;
...@@ -2609,9 +2594,9 @@ static int gem_nway_reset(struct net_device *dev) ...@@ -2609,9 +2594,9 @@ static int gem_nway_reset(struct net_device *dev)
/* Restart link process. */ /* Restart link process. */
spin_lock_irq(&gp->lock); spin_lock_irq(&gp->lock);
spin_lock(&gp->tx_lock); spin_lock(&dev->xmit_lock);
gem_begin_auto_negotiation(gp, NULL); gem_begin_auto_negotiation(gp, NULL);
spin_unlock(&gp->tx_lock); spin_unlock(&dev->xmit_lock);
spin_unlock_irq(&gp->lock); spin_unlock_irq(&gp->lock);
return 0; return 0;
...@@ -2863,7 +2848,6 @@ static int __devinit gem_init_one(struct pci_dev *pdev, ...@@ -2863,7 +2848,6 @@ static int __devinit gem_init_one(struct pci_dev *pdev,
gp->msg_enable = DEFAULT_MSG; gp->msg_enable = DEFAULT_MSG;
spin_lock_init(&gp->lock); spin_lock_init(&gp->lock);
spin_lock_init(&gp->tx_lock);
init_MUTEX(&gp->pm_sem); init_MUTEX(&gp->pm_sem);
init_timer(&gp->link_timer); init_timer(&gp->link_timer);
...@@ -2899,9 +2883,9 @@ static int __devinit gem_init_one(struct pci_dev *pdev, ...@@ -2899,9 +2883,9 @@ static int __devinit gem_init_one(struct pci_dev *pdev,
gem_apple_powerup(gp); gem_apple_powerup(gp);
#endif #endif
spin_lock_irq(&gp->lock); spin_lock_irq(&gp->lock);
spin_lock(&gp->tx_lock); spin_lock(&dev->xmit_lock);
gem_stop(gp); gem_stop(gp);
spin_unlock(&gp->tx_lock); spin_unlock(&dev->xmit_lock);
spin_unlock_irq(&gp->lock); spin_unlock_irq(&gp->lock);
/* Fill up the mii_phy structure (even if we won't use it) */ /* Fill up the mii_phy structure (even if we won't use it) */
...@@ -2967,11 +2951,11 @@ static int __devinit gem_init_one(struct pci_dev *pdev, ...@@ -2967,11 +2951,11 @@ static int __devinit gem_init_one(struct pci_dev *pdev,
/* Detect & init PHY, start autoneg */ /* Detect & init PHY, start autoneg */
spin_lock_irq(&gp->lock); spin_lock_irq(&gp->lock);
spin_lock(&gp->tx_lock); spin_lock(&dev->xmit_lock);
gp->hw_running = 1; gp->hw_running = 1;
gem_init_phy(gp); gem_init_phy(gp);
gem_begin_auto_negotiation(gp, NULL); gem_begin_auto_negotiation(gp, NULL);
spin_unlock(&gp->tx_lock); spin_unlock(&dev->xmit_lock);
spin_unlock_irq(&gp->lock); spin_unlock_irq(&gp->lock);
if (gp->phy_type == phy_mii_mdio0 || if (gp->phy_type == phy_mii_mdio0 ||
...@@ -2982,7 +2966,7 @@ static int __devinit gem_init_one(struct pci_dev *pdev, ...@@ -2982,7 +2966,7 @@ static int __devinit gem_init_one(struct pci_dev *pdev,
pci_set_drvdata(pdev, dev); pci_set_drvdata(pdev, dev);
/* GEM can do it all... */ /* GEM can do it all... */
dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_LLTX; dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM;
if (pci_using_dac) if (pci_using_dac)
dev->features |= NETIF_F_HIGHDMA; dev->features |= NETIF_F_HIGHDMA;
......
...@@ -953,7 +953,6 @@ enum link_state { ...@@ -953,7 +953,6 @@ enum link_state {
struct gem { struct gem {
spinlock_t lock; spinlock_t lock;
spinlock_t tx_lock;
void __iomem *regs; void __iomem *regs;
int rx_new, rx_old; int rx_new, rx_old;
int tx_new, tx_old; int tx_new, tx_old;
......
...@@ -2816,9 +2816,9 @@ static int tg3_poll(struct net_device *netdev, int *budget) ...@@ -2816,9 +2816,9 @@ static int tg3_poll(struct net_device *netdev, int *budget)
/* run TX completion thread */ /* run TX completion thread */
if (sblk->idx[0].tx_consumer != tp->tx_cons) { if (sblk->idx[0].tx_consumer != tp->tx_cons) {
spin_lock(&tp->tx_lock); spin_lock(&netdev->xmit_lock);
tg3_tx(tp); tg3_tx(tp);
spin_unlock(&tp->tx_lock); spin_unlock(&netdev->xmit_lock);
} }
spin_unlock_irqrestore(&tp->lock, flags); spin_unlock_irqrestore(&tp->lock, flags);
...@@ -2939,7 +2939,7 @@ static void tg3_reset_task(void *_data) ...@@ -2939,7 +2939,7 @@ static void tg3_reset_task(void *_data)
tg3_netif_stop(tp); tg3_netif_stop(tp);
spin_lock_irq(&tp->lock); spin_lock_irq(&tp->lock);
spin_lock(&tp->tx_lock); spin_lock(&tp->dev->xmit_lock);
restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER; restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER; tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
...@@ -2949,7 +2949,7 @@ static void tg3_reset_task(void *_data) ...@@ -2949,7 +2949,7 @@ static void tg3_reset_task(void *_data)
tg3_netif_start(tp); tg3_netif_start(tp);
spin_unlock(&tp->tx_lock); spin_unlock(&tp->dev->xmit_lock);
spin_unlock_irq(&tp->lock); spin_unlock_irq(&tp->lock);
if (restart_timer) if (restart_timer)
...@@ -3048,6 +3048,7 @@ static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len) ...@@ -3048,6 +3048,7 @@ static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
(base + len + 8 < base)); (base + len + 8 < base));
} }
/* dev->xmit_lock is held and IRQs are disabled. */
static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
{ {
struct tg3 *tp = netdev_priv(dev); struct tg3 *tp = netdev_priv(dev);
...@@ -3055,39 +3056,12 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -3055,39 +3056,12 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
unsigned int i; unsigned int i;
u32 len, entry, base_flags, mss; u32 len, entry, base_flags, mss;
int would_hit_hwbug; int would_hit_hwbug;
unsigned long flags;
len = skb_headlen(skb); len = skb_headlen(skb);
/* No BH disabling for tx_lock here. We are running in BH disabled
* context and TX reclaim runs via tp->poll inside of a software
* interrupt. Rejoice!
*
* Actually, things are not so simple. If we are to take a hw
* IRQ here, we can deadlock, consider:
*
* CPU1 CPU2
* tg3_start_xmit
* take tp->tx_lock
* tg3_timer
* take tp->lock
* tg3_interrupt
* spin on tp->lock
* spin on tp->tx_lock
*
* So we really do need to disable interrupts when taking
* tx_lock here.
*/
local_irq_save(flags);
if (!spin_trylock(&tp->tx_lock)) {
local_irq_restore(flags);
return NETDEV_TX_LOCKED;
}
/* This is a hard error, log it. */ /* This is a hard error, log it. */
if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) { if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
netif_stop_queue(dev); netif_stop_queue(dev);
spin_unlock_irqrestore(&tp->tx_lock, flags);
printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n", printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n",
dev->name); dev->name);
return NETDEV_TX_BUSY; return NETDEV_TX_BUSY;
...@@ -3224,7 +3198,7 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -3224,7 +3198,7 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
entry, len, entry, len,
last_plus_one, last_plus_one,
&start, mss)) &start, mss))
goto out_unlock; goto out;
entry = start; entry = start;
} }
...@@ -3236,9 +3210,8 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -3236,9 +3210,8 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1)) if (TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1))
netif_stop_queue(dev); netif_stop_queue(dev);
out_unlock: out:
mmiowb(); mmiowb();
spin_unlock_irqrestore(&tp->tx_lock, flags);
dev->trans_start = jiffies; dev->trans_start = jiffies;
...@@ -3273,7 +3246,7 @@ static int tg3_change_mtu(struct net_device *dev, int new_mtu) ...@@ -3273,7 +3246,7 @@ static int tg3_change_mtu(struct net_device *dev, int new_mtu)
tg3_netif_stop(tp); tg3_netif_stop(tp);
spin_lock_irq(&tp->lock); spin_lock_irq(&tp->lock);
spin_lock(&tp->tx_lock); spin_lock(&dev->xmit_lock);
tg3_halt(tp); tg3_halt(tp);
...@@ -3283,7 +3256,7 @@ static int tg3_change_mtu(struct net_device *dev, int new_mtu) ...@@ -3283,7 +3256,7 @@ static int tg3_change_mtu(struct net_device *dev, int new_mtu)
tg3_netif_start(tp); tg3_netif_start(tp);
spin_unlock(&tp->tx_lock); spin_unlock(&dev->xmit_lock);
spin_unlock_irq(&tp->lock); spin_unlock_irq(&tp->lock);
return 0; return 0;
...@@ -5574,7 +5547,7 @@ static void tg3_timer(unsigned long __opaque) ...@@ -5574,7 +5547,7 @@ static void tg3_timer(unsigned long __opaque)
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&tp->lock, flags); spin_lock_irqsave(&tp->lock, flags);
spin_lock(&tp->tx_lock); spin_lock(&tp->dev->xmit_lock);
/* All of this garbage is because when using non-tagged /* All of this garbage is because when using non-tagged
* IRQ status the mailbox/status_block protocol the chip * IRQ status the mailbox/status_block protocol the chip
...@@ -5590,7 +5563,7 @@ static void tg3_timer(unsigned long __opaque) ...@@ -5590,7 +5563,7 @@ static void tg3_timer(unsigned long __opaque)
if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) { if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER; tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
spin_unlock(&tp->tx_lock); spin_unlock(&tp->dev->xmit_lock);
spin_unlock_irqrestore(&tp->lock, flags); spin_unlock_irqrestore(&tp->lock, flags);
schedule_work(&tp->reset_task); schedule_work(&tp->reset_task);
return; return;
...@@ -5659,7 +5632,7 @@ static void tg3_timer(unsigned long __opaque) ...@@ -5659,7 +5632,7 @@ static void tg3_timer(unsigned long __opaque)
tp->asf_counter = tp->asf_multiplier; tp->asf_counter = tp->asf_multiplier;
} }
spin_unlock(&tp->tx_lock); spin_unlock(&tp->dev->xmit_lock);
spin_unlock_irqrestore(&tp->lock, flags); spin_unlock_irqrestore(&tp->lock, flags);
tp->timer.expires = jiffies + tp->timer_offset; tp->timer.expires = jiffies + tp->timer_offset;
...@@ -5672,12 +5645,12 @@ static int tg3_open(struct net_device *dev) ...@@ -5672,12 +5645,12 @@ static int tg3_open(struct net_device *dev)
int err; int err;
spin_lock_irq(&tp->lock); spin_lock_irq(&tp->lock);
spin_lock(&tp->tx_lock); spin_lock(&dev->xmit_lock);
tg3_disable_ints(tp); tg3_disable_ints(tp);
tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE; tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
spin_unlock(&tp->tx_lock); spin_unlock(&dev->xmit_lock);
spin_unlock_irq(&tp->lock); spin_unlock_irq(&tp->lock);
/* The placement of this call is tied /* The placement of this call is tied
...@@ -5696,7 +5669,7 @@ static int tg3_open(struct net_device *dev) ...@@ -5696,7 +5669,7 @@ static int tg3_open(struct net_device *dev)
} }
spin_lock_irq(&tp->lock); spin_lock_irq(&tp->lock);
spin_lock(&tp->tx_lock); spin_lock(&dev->xmit_lock);
err = tg3_init_hw(tp); err = tg3_init_hw(tp);
if (err) { if (err) {
...@@ -5716,7 +5689,7 @@ static int tg3_open(struct net_device *dev) ...@@ -5716,7 +5689,7 @@ static int tg3_open(struct net_device *dev)
tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE; tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
} }
spin_unlock(&tp->tx_lock); spin_unlock(&dev->xmit_lock);
spin_unlock_irq(&tp->lock); spin_unlock_irq(&tp->lock);
if (err) { if (err) {
...@@ -5726,11 +5699,11 @@ static int tg3_open(struct net_device *dev) ...@@ -5726,11 +5699,11 @@ static int tg3_open(struct net_device *dev)
} }
spin_lock_irq(&tp->lock); spin_lock_irq(&tp->lock);
spin_lock(&tp->tx_lock); spin_lock(&dev->xmit_lock);
tg3_enable_ints(tp); tg3_enable_ints(tp);
spin_unlock(&tp->tx_lock); spin_unlock(&dev->xmit_lock);
spin_unlock_irq(&tp->lock); spin_unlock_irq(&tp->lock);
netif_start_queue(dev); netif_start_queue(dev);
...@@ -5978,7 +5951,7 @@ static int tg3_close(struct net_device *dev) ...@@ -5978,7 +5951,7 @@ static int tg3_close(struct net_device *dev)
del_timer_sync(&tp->timer); del_timer_sync(&tp->timer);
spin_lock_irq(&tp->lock); spin_lock_irq(&tp->lock);
spin_lock(&tp->tx_lock); spin_lock(&dev->xmit_lock);
#if 0 #if 0
tg3_dump_state(tp); tg3_dump_state(tp);
#endif #endif
...@@ -5992,7 +5965,7 @@ static int tg3_close(struct net_device *dev) ...@@ -5992,7 +5965,7 @@ static int tg3_close(struct net_device *dev)
TG3_FLAG_GOT_SERDES_FLOWCTL); TG3_FLAG_GOT_SERDES_FLOWCTL);
netif_carrier_off(tp->dev); netif_carrier_off(tp->dev);
spin_unlock(&tp->tx_lock); spin_unlock(&dev->xmit_lock);
spin_unlock_irq(&tp->lock); spin_unlock_irq(&tp->lock);
free_irq(dev->irq, dev); free_irq(dev->irq, dev);
...@@ -6291,15 +6264,10 @@ static void __tg3_set_rx_mode(struct net_device *dev) ...@@ -6291,15 +6264,10 @@ static void __tg3_set_rx_mode(struct net_device *dev)
} }
} }
/* Called with dev->xmit_lock held and IRQs disabled. */
static void tg3_set_rx_mode(struct net_device *dev) static void tg3_set_rx_mode(struct net_device *dev)
{ {
struct tg3 *tp = netdev_priv(dev);
spin_lock_irq(&tp->lock);
spin_lock(&tp->tx_lock);
__tg3_set_rx_mode(dev); __tg3_set_rx_mode(dev);
spin_unlock(&tp->tx_lock);
spin_unlock_irq(&tp->lock);
} }
#define TG3_REGDUMP_LEN (32 * 1024) #define TG3_REGDUMP_LEN (32 * 1024)
...@@ -6322,7 +6290,7 @@ static void tg3_get_regs(struct net_device *dev, ...@@ -6322,7 +6290,7 @@ static void tg3_get_regs(struct net_device *dev,
memset(p, 0, TG3_REGDUMP_LEN); memset(p, 0, TG3_REGDUMP_LEN);
spin_lock_irq(&tp->lock); spin_lock_irq(&tp->lock);
spin_lock(&tp->tx_lock); spin_lock(&dev->xmit_lock);
#define __GET_REG32(reg) (*(p)++ = tr32(reg)) #define __GET_REG32(reg) (*(p)++ = tr32(reg))
#define GET_REG32_LOOP(base,len) \ #define GET_REG32_LOOP(base,len) \
...@@ -6372,7 +6340,7 @@ do { p = (u32 *)(orig_p + (reg)); \ ...@@ -6372,7 +6340,7 @@ do { p = (u32 *)(orig_p + (reg)); \
#undef GET_REG32_LOOP #undef GET_REG32_LOOP
#undef GET_REG32_1 #undef GET_REG32_1
spin_unlock(&tp->tx_lock); spin_unlock(&dev->xmit_lock);
spin_unlock_irq(&tp->lock); spin_unlock_irq(&tp->lock);
} }
...@@ -6496,7 +6464,7 @@ static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) ...@@ -6496,7 +6464,7 @@ static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
} }
spin_lock_irq(&tp->lock); spin_lock_irq(&tp->lock);
spin_lock(&tp->tx_lock); spin_lock(&dev->xmit_lock);
tp->link_config.autoneg = cmd->autoneg; tp->link_config.autoneg = cmd->autoneg;
if (cmd->autoneg == AUTONEG_ENABLE) { if (cmd->autoneg == AUTONEG_ENABLE) {
...@@ -6510,7 +6478,7 @@ static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) ...@@ -6510,7 +6478,7 @@ static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
} }
tg3_setup_phy(tp, 1); tg3_setup_phy(tp, 1);
spin_unlock(&tp->tx_lock); spin_unlock(&dev->xmit_lock);
spin_unlock_irq(&tp->lock); spin_unlock_irq(&tp->lock);
return 0; return 0;
...@@ -6627,7 +6595,7 @@ static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *e ...@@ -6627,7 +6595,7 @@ static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *e
tg3_netif_stop(tp); tg3_netif_stop(tp);
spin_lock_irq(&tp->lock); spin_lock_irq(&tp->lock);
spin_lock(&tp->tx_lock); spin_lock(&dev->xmit_lock);
tp->rx_pending = ering->rx_pending; tp->rx_pending = ering->rx_pending;
...@@ -6640,7 +6608,7 @@ static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *e ...@@ -6640,7 +6608,7 @@ static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *e
tg3_halt(tp); tg3_halt(tp);
tg3_init_hw(tp); tg3_init_hw(tp);
tg3_netif_start(tp); tg3_netif_start(tp);
spin_unlock(&tp->tx_lock); spin_unlock(&dev->xmit_lock);
spin_unlock_irq(&tp->lock); spin_unlock_irq(&tp->lock);
return 0; return 0;
...@@ -6661,7 +6629,7 @@ static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam ...@@ -6661,7 +6629,7 @@ static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam
tg3_netif_stop(tp); tg3_netif_stop(tp);
spin_lock_irq(&tp->lock); spin_lock_irq(&tp->lock);
spin_lock(&tp->tx_lock); spin_lock(&dev->xmit_lock);
if (epause->autoneg) if (epause->autoneg)
tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG; tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
else else
...@@ -6677,7 +6645,7 @@ static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam ...@@ -6677,7 +6645,7 @@ static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam
tg3_halt(tp); tg3_halt(tp);
tg3_init_hw(tp); tg3_init_hw(tp);
tg3_netif_start(tp); tg3_netif_start(tp);
spin_unlock(&tp->tx_lock); spin_unlock(&dev->xmit_lock);
spin_unlock_irq(&tp->lock); spin_unlock_irq(&tp->lock);
return 0; return 0;
...@@ -6803,14 +6771,14 @@ static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp) ...@@ -6803,14 +6771,14 @@ static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
struct tg3 *tp = netdev_priv(dev); struct tg3 *tp = netdev_priv(dev);
spin_lock_irq(&tp->lock); spin_lock_irq(&tp->lock);
spin_lock(&tp->tx_lock); spin_lock(&dev->xmit_lock);
tp->vlgrp = grp; tp->vlgrp = grp;
/* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */ /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
__tg3_set_rx_mode(dev); __tg3_set_rx_mode(dev);
spin_unlock(&tp->tx_lock); spin_unlock(&dev->xmit_lock);
spin_unlock_irq(&tp->lock); spin_unlock_irq(&tp->lock);
} }
...@@ -6819,10 +6787,10 @@ static void tg3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) ...@@ -6819,10 +6787,10 @@ static void tg3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
struct tg3 *tp = netdev_priv(dev); struct tg3 *tp = netdev_priv(dev);
spin_lock_irq(&tp->lock); spin_lock_irq(&tp->lock);
spin_lock(&tp->tx_lock); spin_lock(&dev->xmit_lock);
if (tp->vlgrp) if (tp->vlgrp)
tp->vlgrp->vlan_devices[vid] = NULL; tp->vlgrp->vlan_devices[vid] = NULL;
spin_unlock(&tp->tx_lock); spin_unlock(&dev->xmit_lock);
spin_unlock_irq(&tp->lock); spin_unlock_irq(&tp->lock);
} }
#endif #endif
...@@ -8241,7 +8209,6 @@ static int __devinit tg3_init_one(struct pci_dev *pdev, ...@@ -8241,7 +8209,6 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
if (pci_using_dac) if (pci_using_dac)
dev->features |= NETIF_F_HIGHDMA; dev->features |= NETIF_F_HIGHDMA;
dev->features |= NETIF_F_LLTX;
#if TG3_VLAN_TAG_USED #if TG3_VLAN_TAG_USED
dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
dev->vlan_rx_register = tg3_vlan_rx_register; dev->vlan_rx_register = tg3_vlan_rx_register;
...@@ -8283,7 +8250,6 @@ static int __devinit tg3_init_one(struct pci_dev *pdev, ...@@ -8283,7 +8250,6 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA; tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
#endif #endif
spin_lock_init(&tp->lock); spin_lock_init(&tp->lock);
spin_lock_init(&tp->tx_lock);
spin_lock_init(&tp->indirect_lock); spin_lock_init(&tp->indirect_lock);
INIT_WORK(&tp->reset_task, tg3_reset_task, tp); INIT_WORK(&tp->reset_task, tg3_reset_task, tp);
...@@ -8496,23 +8462,23 @@ static int tg3_suspend(struct pci_dev *pdev, u32 state) ...@@ -8496,23 +8462,23 @@ static int tg3_suspend(struct pci_dev *pdev, u32 state)
del_timer_sync(&tp->timer); del_timer_sync(&tp->timer);
spin_lock_irq(&tp->lock); spin_lock_irq(&tp->lock);
spin_lock(&tp->tx_lock); spin_lock(&dev->xmit_lock);
tg3_disable_ints(tp); tg3_disable_ints(tp);
spin_unlock(&tp->tx_lock); spin_unlock(&dev->xmit_lock);
spin_unlock_irq(&tp->lock); spin_unlock_irq(&tp->lock);
netif_device_detach(dev); netif_device_detach(dev);
spin_lock_irq(&tp->lock); spin_lock_irq(&tp->lock);
spin_lock(&tp->tx_lock); spin_lock(&dev->xmit_lock);
tg3_halt(tp); tg3_halt(tp);
spin_unlock(&tp->tx_lock); spin_unlock(&dev->xmit_lock);
spin_unlock_irq(&tp->lock); spin_unlock_irq(&tp->lock);
err = tg3_set_power_state(tp, state); err = tg3_set_power_state(tp, state);
if (err) { if (err) {
spin_lock_irq(&tp->lock); spin_lock_irq(&tp->lock);
spin_lock(&tp->tx_lock); spin_lock(&dev->xmit_lock);
tg3_init_hw(tp); tg3_init_hw(tp);
...@@ -8522,7 +8488,7 @@ static int tg3_suspend(struct pci_dev *pdev, u32 state) ...@@ -8522,7 +8488,7 @@ static int tg3_suspend(struct pci_dev *pdev, u32 state)
netif_device_attach(dev); netif_device_attach(dev);
tg3_netif_start(tp); tg3_netif_start(tp);
spin_unlock(&tp->tx_lock); spin_unlock(&dev->xmit_lock);
spin_unlock_irq(&tp->lock); spin_unlock_irq(&tp->lock);
} }
...@@ -8547,7 +8513,7 @@ static int tg3_resume(struct pci_dev *pdev) ...@@ -8547,7 +8513,7 @@ static int tg3_resume(struct pci_dev *pdev)
netif_device_attach(dev); netif_device_attach(dev);
spin_lock_irq(&tp->lock); spin_lock_irq(&tp->lock);
spin_lock(&tp->tx_lock); spin_lock(&dev->xmit_lock);
tg3_init_hw(tp); tg3_init_hw(tp);
...@@ -8558,7 +8524,7 @@ static int tg3_resume(struct pci_dev *pdev) ...@@ -8558,7 +8524,7 @@ static int tg3_resume(struct pci_dev *pdev)
tg3_netif_start(tp); tg3_netif_start(tp);
spin_unlock(&tp->tx_lock); spin_unlock(&dev->xmit_lock);
spin_unlock_irq(&tp->lock); spin_unlock_irq(&tp->lock);
return 0; return 0;
......
...@@ -1980,12 +1980,11 @@ struct tg3 { ...@@ -1980,12 +1980,11 @@ struct tg3 {
* lock: Held during all operations except TX packet * lock: Held during all operations except TX packet
* processing. * processing.
* *
* tx_lock: Held during tg3_start_xmit{,_4gbug} and tg3_tx * dev->xmit_lock: Held during tg3_start_xmit and tg3_tx
* *
* If you want to shut up all asynchronous processing you must * If you want to shut up all asynchronous processing you must
* acquire both locks, 'lock' taken before 'tx_lock'. IRQs must * acquire both locks, 'lock' taken before 'xmit_lock'. IRQs must
* be disabled to take 'lock' but only softirq disabling is * be disabled to take either lock.
* necessary for acquisition of 'tx_lock'.
*/ */
spinlock_t lock; spinlock_t lock;
spinlock_t indirect_lock; spinlock_t indirect_lock;
...@@ -2004,8 +2003,6 @@ struct tg3 { ...@@ -2004,8 +2003,6 @@ struct tg3 {
u32 tx_cons; u32 tx_cons;
u32 tx_pending; u32 tx_pending;
spinlock_t tx_lock;
struct tg3_tx_buffer_desc *tx_ring; struct tg3_tx_buffer_desc *tx_ring;
struct tx_ring_info *tx_buffers; struct tx_ring_info *tx_buffers;
dma_addr_t tx_desc_mapping; dma_addr_t tx_desc_mapping;
......
...@@ -76,7 +76,6 @@ struct ethtool_ops; ...@@ -76,7 +76,6 @@ struct ethtool_ops;
/* Driver transmit return codes */ /* Driver transmit return codes */
#define NETDEV_TX_OK 0 /* driver took care of packet */ #define NETDEV_TX_OK 0 /* driver took care of packet */
#define NETDEV_TX_BUSY 1 /* driver tx path was busy*/ #define NETDEV_TX_BUSY 1 /* driver tx path was busy*/
#define NETDEV_TX_LOCKED -1 /* driver tx lock was already taken */
/* /*
* Compute the worst case header length according to the protocols * Compute the worst case header length according to the protocols
...@@ -415,7 +414,7 @@ struct net_device ...@@ -415,7 +414,7 @@ struct net_device
#define NETIF_F_HW_VLAN_FILTER 512 /* Receive filtering on VLAN */ #define NETIF_F_HW_VLAN_FILTER 512 /* Receive filtering on VLAN */
#define NETIF_F_VLAN_CHALLENGED 1024 /* Device cannot handle VLAN packets */ #define NETIF_F_VLAN_CHALLENGED 1024 /* Device cannot handle VLAN packets */
#define NETIF_F_TSO 2048 /* Can offload TCP/IP segmentation */ #define NETIF_F_TSO 2048 /* Can offload TCP/IP segmentation */
#define NETIF_F_LLTX 4096 /* LockLess TX */ #define NETIF_F_LLTX 4096 /* Do not grab xmit_lock during ->hard_start_xmit */
/* Called after device is detached from network. */ /* Called after device is detached from network. */
void (*uninit)(struct net_device *dev); void (*uninit)(struct net_device *dev);
...@@ -894,9 +893,11 @@ static inline void __netif_rx_complete(struct net_device *dev) ...@@ -894,9 +893,11 @@ static inline void __netif_rx_complete(struct net_device *dev)
static inline void netif_tx_disable(struct net_device *dev) static inline void netif_tx_disable(struct net_device *dev)
{ {
spin_lock_bh(&dev->xmit_lock); unsigned long flags;
spin_lock_irqsave(&dev->xmit_lock, flags);
netif_stop_queue(dev); netif_stop_queue(dev);
spin_unlock_bh(&dev->xmit_lock); spin_unlock_irqrestore(&dev->xmit_lock, flags);
} }
/* These functions live elsewhere (drivers/net/net_init.c, but related) */ /* These functions live elsewhere (drivers/net/net_init.c, but related) */
......
...@@ -97,7 +97,7 @@ static void unlink_clip_vcc(struct clip_vcc *clip_vcc) ...@@ -97,7 +97,7 @@ static void unlink_clip_vcc(struct clip_vcc *clip_vcc)
printk(KERN_CRIT "!clip_vcc->entry (clip_vcc %p)\n",clip_vcc); printk(KERN_CRIT "!clip_vcc->entry (clip_vcc %p)\n",clip_vcc);
return; return;
} }
spin_lock_bh(&entry->neigh->dev->xmit_lock); /* block clip_start_xmit() */ spin_lock_irq(&entry->neigh->dev->xmit_lock); /* block clip_start_xmit() */
entry->neigh->used = jiffies; entry->neigh->used = jiffies;
for (walk = &entry->vccs; *walk; walk = &(*walk)->next) for (walk = &entry->vccs; *walk; walk = &(*walk)->next)
if (*walk == clip_vcc) { if (*walk == clip_vcc) {
...@@ -121,7 +121,7 @@ static void unlink_clip_vcc(struct clip_vcc *clip_vcc) ...@@ -121,7 +121,7 @@ static void unlink_clip_vcc(struct clip_vcc *clip_vcc)
printk(KERN_CRIT "ATMARP: unlink_clip_vcc failed (entry %p, vcc " printk(KERN_CRIT "ATMARP: unlink_clip_vcc failed (entry %p, vcc "
"0x%p)\n",entry,clip_vcc); "0x%p)\n",entry,clip_vcc);
out: out:
spin_unlock_bh(&entry->neigh->dev->xmit_lock); spin_unlock_irq(&entry->neigh->dev->xmit_lock);
} }
/* The neighbour entry n->lock is held. */ /* The neighbour entry n->lock is held. */
......
...@@ -1190,7 +1190,7 @@ int __skb_linearize(struct sk_buff *skb, int gfp_mask) ...@@ -1190,7 +1190,7 @@ int __skb_linearize(struct sk_buff *skb, int gfp_mask)
#define HARD_TX_LOCK(dev, cpu) { \ #define HARD_TX_LOCK(dev, cpu) { \
if ((dev->features & NETIF_F_LLTX) == 0) { \ if ((dev->features & NETIF_F_LLTX) == 0) { \
spin_lock(&dev->xmit_lock); \ spin_lock_irq(&dev->xmit_lock); \
dev->xmit_lock_owner = cpu; \ dev->xmit_lock_owner = cpu; \
} \ } \
} }
...@@ -1198,7 +1198,7 @@ int __skb_linearize(struct sk_buff *skb, int gfp_mask) ...@@ -1198,7 +1198,7 @@ int __skb_linearize(struct sk_buff *skb, int gfp_mask)
#define HARD_TX_UNLOCK(dev) { \ #define HARD_TX_UNLOCK(dev) { \
if ((dev->features & NETIF_F_LLTX) == 0) { \ if ((dev->features & NETIF_F_LLTX) == 0) { \
dev->xmit_lock_owner = -1; \ dev->xmit_lock_owner = -1; \
spin_unlock(&dev->xmit_lock); \ spin_unlock_irq(&dev->xmit_lock); \
} \ } \
} }
......
...@@ -93,9 +93,9 @@ static void __dev_mc_upload(struct net_device *dev) ...@@ -93,9 +93,9 @@ static void __dev_mc_upload(struct net_device *dev)
void dev_mc_upload(struct net_device *dev) void dev_mc_upload(struct net_device *dev)
{ {
spin_lock_bh(&dev->xmit_lock); spin_lock_irq(&dev->xmit_lock);
__dev_mc_upload(dev); __dev_mc_upload(dev);
spin_unlock_bh(&dev->xmit_lock); spin_unlock_irq(&dev->xmit_lock);
} }
/* /*
...@@ -107,7 +107,7 @@ int dev_mc_delete(struct net_device *dev, void *addr, int alen, int glbl) ...@@ -107,7 +107,7 @@ int dev_mc_delete(struct net_device *dev, void *addr, int alen, int glbl)
int err = 0; int err = 0;
struct dev_mc_list *dmi, **dmip; struct dev_mc_list *dmi, **dmip;
spin_lock_bh(&dev->xmit_lock); spin_lock_irq(&dev->xmit_lock);
for (dmip = &dev->mc_list; (dmi = *dmip) != NULL; dmip = &dmi->next) { for (dmip = &dev->mc_list; (dmi = *dmip) != NULL; dmip = &dmi->next) {
/* /*
...@@ -139,13 +139,13 @@ int dev_mc_delete(struct net_device *dev, void *addr, int alen, int glbl) ...@@ -139,13 +139,13 @@ int dev_mc_delete(struct net_device *dev, void *addr, int alen, int glbl)
*/ */
__dev_mc_upload(dev); __dev_mc_upload(dev);
spin_unlock_bh(&dev->xmit_lock); spin_unlock_irq(&dev->xmit_lock);
return 0; return 0;
} }
} }
err = -ENOENT; err = -ENOENT;
done: done:
spin_unlock_bh(&dev->xmit_lock); spin_unlock_irq(&dev->xmit_lock);
return err; return err;
} }
...@@ -160,7 +160,7 @@ int dev_mc_add(struct net_device *dev, void *addr, int alen, int glbl) ...@@ -160,7 +160,7 @@ int dev_mc_add(struct net_device *dev, void *addr, int alen, int glbl)
dmi1 = (struct dev_mc_list *)kmalloc(sizeof(*dmi), GFP_ATOMIC); dmi1 = (struct dev_mc_list *)kmalloc(sizeof(*dmi), GFP_ATOMIC);
spin_lock_bh(&dev->xmit_lock); spin_lock_irq(&dev->xmit_lock);
for (dmi = dev->mc_list; dmi != NULL; dmi = dmi->next) { for (dmi = dev->mc_list; dmi != NULL; dmi = dmi->next) {
if (memcmp(dmi->dmi_addr, addr, dmi->dmi_addrlen) == 0 && if (memcmp(dmi->dmi_addr, addr, dmi->dmi_addrlen) == 0 &&
dmi->dmi_addrlen == alen) { dmi->dmi_addrlen == alen) {
...@@ -176,7 +176,7 @@ int dev_mc_add(struct net_device *dev, void *addr, int alen, int glbl) ...@@ -176,7 +176,7 @@ int dev_mc_add(struct net_device *dev, void *addr, int alen, int glbl)
} }
if ((dmi = dmi1) == NULL) { if ((dmi = dmi1) == NULL) {
spin_unlock_bh(&dev->xmit_lock); spin_unlock_irq(&dev->xmit_lock);
return -ENOMEM; return -ENOMEM;
} }
memcpy(dmi->dmi_addr, addr, alen); memcpy(dmi->dmi_addr, addr, alen);
...@@ -189,11 +189,11 @@ int dev_mc_add(struct net_device *dev, void *addr, int alen, int glbl) ...@@ -189,11 +189,11 @@ int dev_mc_add(struct net_device *dev, void *addr, int alen, int glbl)
__dev_mc_upload(dev); __dev_mc_upload(dev);
spin_unlock_bh(&dev->xmit_lock); spin_unlock_irq(&dev->xmit_lock);
return 0; return 0;
done: done:
spin_unlock_bh(&dev->xmit_lock); spin_unlock_irq(&dev->xmit_lock);
if (dmi1) if (dmi1)
kfree(dmi1); kfree(dmi1);
return err; return err;
...@@ -205,7 +205,7 @@ int dev_mc_add(struct net_device *dev, void *addr, int alen, int glbl) ...@@ -205,7 +205,7 @@ int dev_mc_add(struct net_device *dev, void *addr, int alen, int glbl)
void dev_mc_discard(struct net_device *dev) void dev_mc_discard(struct net_device *dev)
{ {
spin_lock_bh(&dev->xmit_lock); spin_lock_irq(&dev->xmit_lock);
while (dev->mc_list != NULL) { while (dev->mc_list != NULL) {
struct dev_mc_list *tmp = dev->mc_list; struct dev_mc_list *tmp = dev->mc_list;
...@@ -216,7 +216,7 @@ void dev_mc_discard(struct net_device *dev) ...@@ -216,7 +216,7 @@ void dev_mc_discard(struct net_device *dev)
} }
dev->mc_count = 0; dev->mc_count = 0;
spin_unlock_bh(&dev->xmit_lock); spin_unlock_irq(&dev->xmit_lock);
} }
#ifdef CONFIG_PROC_FS #ifdef CONFIG_PROC_FS
...@@ -251,7 +251,7 @@ static int dev_mc_seq_show(struct seq_file *seq, void *v) ...@@ -251,7 +251,7 @@ static int dev_mc_seq_show(struct seq_file *seq, void *v)
struct dev_mc_list *m; struct dev_mc_list *m;
struct net_device *dev = v; struct net_device *dev = v;
spin_lock_bh(&dev->xmit_lock); spin_lock_irq(&dev->xmit_lock);
for (m = dev->mc_list; m; m = m->next) { for (m = dev->mc_list; m; m = m->next) {
int i; int i;
...@@ -263,7 +263,7 @@ static int dev_mc_seq_show(struct seq_file *seq, void *v) ...@@ -263,7 +263,7 @@ static int dev_mc_seq_show(struct seq_file *seq, void *v)
seq_putc(seq, '\n'); seq_putc(seq, '\n');
} }
spin_unlock_bh(&dev->xmit_lock); spin_unlock_irq(&dev->xmit_lock);
return 0; return 0;
} }
......
...@@ -188,7 +188,7 @@ static void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb) ...@@ -188,7 +188,7 @@ static void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
return; return;
} }
spin_lock(&np->dev->xmit_lock); spin_lock_irq(&np->dev->xmit_lock);
np->dev->xmit_lock_owner = smp_processor_id(); np->dev->xmit_lock_owner = smp_processor_id();
/* /*
...@@ -197,7 +197,7 @@ static void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb) ...@@ -197,7 +197,7 @@ static void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
*/ */
if (netif_queue_stopped(np->dev)) { if (netif_queue_stopped(np->dev)) {
np->dev->xmit_lock_owner = -1; np->dev->xmit_lock_owner = -1;
spin_unlock(&np->dev->xmit_lock); spin_unlock_irq(&np->dev->xmit_lock);
netpoll_poll(np); netpoll_poll(np);
goto repeat; goto repeat;
...@@ -205,7 +205,7 @@ static void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb) ...@@ -205,7 +205,7 @@ static void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
status = np->dev->hard_start_xmit(skb, np->dev); status = np->dev->hard_start_xmit(skb, np->dev);
np->dev->xmit_lock_owner = -1; np->dev->xmit_lock_owner = -1;
spin_unlock(&np->dev->xmit_lock); spin_unlock_irq(&np->dev->xmit_lock);
/* transmit busy */ /* transmit busy */
if(status) { if(status) {
......
...@@ -2664,12 +2664,11 @@ __inline__ void pktgen_xmit(struct pktgen_dev *pkt_dev) ...@@ -2664,12 +2664,11 @@ __inline__ void pktgen_xmit(struct pktgen_dev *pkt_dev)
} }
} }
spin_lock_bh(&odev->xmit_lock); spin_lock_irq(&odev->xmit_lock);
if (!netif_queue_stopped(odev)) { if (!netif_queue_stopped(odev)) {
u64 now; u64 now;
atomic_inc(&(pkt_dev->skb->users)); atomic_inc(&(pkt_dev->skb->users));
retry_now:
ret = odev->hard_start_xmit(pkt_dev->skb, odev); ret = odev->hard_start_xmit(pkt_dev->skb, odev);
if (likely(ret == NETDEV_TX_OK)) { if (likely(ret == NETDEV_TX_OK)) {
pkt_dev->last_ok = 1; pkt_dev->last_ok = 1;
...@@ -2677,10 +2676,6 @@ __inline__ void pktgen_xmit(struct pktgen_dev *pkt_dev) ...@@ -2677,10 +2676,6 @@ __inline__ void pktgen_xmit(struct pktgen_dev *pkt_dev)
pkt_dev->seq_num++; pkt_dev->seq_num++;
pkt_dev->tx_bytes += pkt_dev->cur_pkt_size; pkt_dev->tx_bytes += pkt_dev->cur_pkt_size;
} else if (ret == NETDEV_TX_LOCKED
&& (odev->features & NETIF_F_LLTX)) {
cpu_relax();
goto retry_now;
} else { /* Retry it next time */ } else { /* Retry it next time */
atomic_dec(&(pkt_dev->skb->users)); atomic_dec(&(pkt_dev->skb->users));
...@@ -2716,7 +2711,7 @@ __inline__ void pktgen_xmit(struct pktgen_dev *pkt_dev) ...@@ -2716,7 +2711,7 @@ __inline__ void pktgen_xmit(struct pktgen_dev *pkt_dev)
pkt_dev->next_tx_ns = 0; pkt_dev->next_tx_ns = 0;
} }
spin_unlock_bh(&odev->xmit_lock); spin_unlock_irq(&odev->xmit_lock);
/* If pkt_dev->count is zero, then run forever */ /* If pkt_dev->count is zero, then run forever */
if ((pkt_dev->count != 0) && (pkt_dev->sofar >= pkt_dev->count)) { if ((pkt_dev->count != 0) && (pkt_dev->sofar >= pkt_dev->count)) {
......
...@@ -99,17 +99,11 @@ int qdisc_restart(struct net_device *dev) ...@@ -99,17 +99,11 @@ int qdisc_restart(struct net_device *dev)
if ((skb = q->dequeue(q)) != NULL) { if ((skb = q->dequeue(q)) != NULL) {
unsigned nolock = (dev->features & NETIF_F_LLTX); unsigned nolock = (dev->features & NETIF_F_LLTX);
/* /*
* When the driver has LLTX set it does its own locking * When the driver has LLTX set it does not require any
* in start_xmit. No need to add additional overhead by * locking in start_xmit.
* locking again. These checks are worth it because
* even uncongested locks can be quite expensive.
* The driver can do trylock like here too, in case
* of lock congestion it should return -1 and the packet
* will be requeued.
*/ */
if (!nolock) { if (!nolock) {
if (!spin_trylock(&dev->xmit_lock)) { if (!spin_trylock_irq(&dev->xmit_lock)) {
collision:
/* So, someone grabbed the driver. */ /* So, someone grabbed the driver. */
/* It may be transient configuration error, /* It may be transient configuration error,
...@@ -143,22 +137,18 @@ int qdisc_restart(struct net_device *dev) ...@@ -143,22 +137,18 @@ int qdisc_restart(struct net_device *dev)
if (ret == NETDEV_TX_OK) { if (ret == NETDEV_TX_OK) {
if (!nolock) { if (!nolock) {
dev->xmit_lock_owner = -1; dev->xmit_lock_owner = -1;
spin_unlock(&dev->xmit_lock); spin_unlock_irq(&dev->xmit_lock);
} }
spin_lock(&dev->queue_lock); spin_lock(&dev->queue_lock);
return -1; return -1;
} }
if (ret == NETDEV_TX_LOCKED && nolock) {
spin_lock(&dev->queue_lock);
goto collision;
}
} }
/* NETDEV_TX_BUSY - we need to requeue */ /* NETDEV_TX_BUSY - we need to requeue */
/* Release the driver */ /* Release the driver */
if (!nolock) { if (!nolock) {
dev->xmit_lock_owner = -1; dev->xmit_lock_owner = -1;
spin_unlock(&dev->xmit_lock); spin_unlock_irq(&dev->xmit_lock);
} }
spin_lock(&dev->queue_lock); spin_lock(&dev->queue_lock);
q = dev->qdisc; q = dev->qdisc;
...@@ -186,7 +176,7 @@ static void dev_watchdog(unsigned long arg) ...@@ -186,7 +176,7 @@ static void dev_watchdog(unsigned long arg)
{ {
struct net_device *dev = (struct net_device *)arg; struct net_device *dev = (struct net_device *)arg;
spin_lock(&dev->xmit_lock); spin_lock_irq(&dev->xmit_lock);
if (dev->qdisc != &noop_qdisc) { if (dev->qdisc != &noop_qdisc) {
if (netif_device_present(dev) && if (netif_device_present(dev) &&
netif_running(dev) && netif_running(dev) &&
...@@ -200,7 +190,7 @@ static void dev_watchdog(unsigned long arg) ...@@ -200,7 +190,7 @@ static void dev_watchdog(unsigned long arg)
dev_hold(dev); dev_hold(dev);
} }
} }
spin_unlock(&dev->xmit_lock); spin_unlock_irq(&dev->xmit_lock);
dev_put(dev); dev_put(dev);
} }
...@@ -224,17 +214,17 @@ void __netdev_watchdog_up(struct net_device *dev) ...@@ -224,17 +214,17 @@ void __netdev_watchdog_up(struct net_device *dev)
static void dev_watchdog_up(struct net_device *dev) static void dev_watchdog_up(struct net_device *dev)
{ {
spin_lock_bh(&dev->xmit_lock); spin_lock_irq(&dev->xmit_lock);
__netdev_watchdog_up(dev); __netdev_watchdog_up(dev);
spin_unlock_bh(&dev->xmit_lock); spin_unlock_irq(&dev->xmit_lock);
} }
static void dev_watchdog_down(struct net_device *dev) static void dev_watchdog_down(struct net_device *dev)
{ {
spin_lock_bh(&dev->xmit_lock); spin_lock_irq(&dev->xmit_lock);
if (del_timer(&dev->watchdog_timer)) if (del_timer(&dev->watchdog_timer))
__dev_put(dev); __dev_put(dev);
spin_unlock_bh(&dev->xmit_lock); spin_unlock_irq(&dev->xmit_lock);
} }
/* "NOOP" scheduler: the best scheduler, recommended for all interfaces /* "NOOP" scheduler: the best scheduler, recommended for all interfaces
......
...@@ -301,12 +301,12 @@ static int teql_master_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -301,12 +301,12 @@ static int teql_master_xmit(struct sk_buff *skb, struct net_device *dev)
switch (teql_resolve(skb, skb_res, slave)) { switch (teql_resolve(skb, skb_res, slave)) {
case 0: case 0:
if (spin_trylock(&slave->xmit_lock)) { if (spin_trylock_irq(&slave->xmit_lock)) {
slave->xmit_lock_owner = smp_processor_id(); slave->xmit_lock_owner = smp_processor_id();
if (!netif_queue_stopped(slave) && if (!netif_queue_stopped(slave) &&
slave->hard_start_xmit(skb, slave) == 0) { slave->hard_start_xmit(skb, slave) == 0) {
slave->xmit_lock_owner = -1; slave->xmit_lock_owner = -1;
spin_unlock(&slave->xmit_lock); spin_unlock_irq(&slave->xmit_lock);
master->slaves = NEXT_SLAVE(q); master->slaves = NEXT_SLAVE(q);
netif_wake_queue(dev); netif_wake_queue(dev);
master->stats.tx_packets++; master->stats.tx_packets++;
...@@ -314,7 +314,7 @@ static int teql_master_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -314,7 +314,7 @@ static int teql_master_xmit(struct sk_buff *skb, struct net_device *dev)
return 0; return 0;
} }
slave->xmit_lock_owner = -1; slave->xmit_lock_owner = -1;
spin_unlock(&slave->xmit_lock); spin_unlock_irq(&slave->xmit_lock);
} }
if (netif_queue_stopped(dev)) if (netif_queue_stopped(dev))
busy = 1; busy = 1;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment