Commit 8ff499e4 authored by Dongdong Deng's avatar Dongdong Deng Committed by David S. Miller

smc91x: let smc91x work well under netpoll

The NETPOLL requires that interrupts remain disabled in its callbacks.

Using *_irq_save()/irq_restore() to replace *_irq_disable()/irq_enable()
functions in NETPOLL's callbacks of smc91x, so that it doesn't enable
interrupts when already disabled, and kgdboe/netconsole would work
properly over smc91x.
Signed-off-by: default avatarDongdong Deng <dongdong.deng@windriver.com>
Acked-by: default avatarNicolas Pitre <nico@cam.org>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent d2f3ad4c
...@@ -196,21 +196,23 @@ static void PRINT_PKT(u_char *buf, int length) ...@@ -196,21 +196,23 @@ static void PRINT_PKT(u_char *buf, int length)
/* this enables an interrupt in the interrupt mask register */ /* this enables an interrupt in the interrupt mask register */
#define SMC_ENABLE_INT(lp, x) do { \ #define SMC_ENABLE_INT(lp, x) do { \
unsigned char mask; \ unsigned char mask; \
spin_lock_irq(&lp->lock); \ unsigned long smc_enable_flags; \
spin_lock_irqsave(&lp->lock, smc_enable_flags); \
mask = SMC_GET_INT_MASK(lp); \ mask = SMC_GET_INT_MASK(lp); \
mask |= (x); \ mask |= (x); \
SMC_SET_INT_MASK(lp, mask); \ SMC_SET_INT_MASK(lp, mask); \
spin_unlock_irq(&lp->lock); \ spin_unlock_irqrestore(&lp->lock, smc_enable_flags); \
} while (0) } while (0)
/* this disables an interrupt from the interrupt mask register */ /* this disables an interrupt from the interrupt mask register */
#define SMC_DISABLE_INT(lp, x) do { \ #define SMC_DISABLE_INT(lp, x) do { \
unsigned char mask; \ unsigned char mask; \
spin_lock_irq(&lp->lock); \ unsigned long smc_disable_flags; \
spin_lock_irqsave(&lp->lock, smc_disable_flags); \
mask = SMC_GET_INT_MASK(lp); \ mask = SMC_GET_INT_MASK(lp); \
mask &= ~(x); \ mask &= ~(x); \
SMC_SET_INT_MASK(lp, mask); \ SMC_SET_INT_MASK(lp, mask); \
spin_unlock_irq(&lp->lock); \ spin_unlock_irqrestore(&lp->lock, smc_disable_flags); \
} while (0) } while (0)
/* /*
...@@ -520,21 +522,21 @@ static inline void smc_rcv(struct net_device *dev) ...@@ -520,21 +522,21 @@ static inline void smc_rcv(struct net_device *dev)
* any other concurrent access and C would always interrupt B. But life * any other concurrent access and C would always interrupt B. But life
* isn't that easy in a SMP world... * isn't that easy in a SMP world...
*/ */
#define smc_special_trylock(lock) \ #define smc_special_trylock(lock, flags) \
({ \ ({ \
int __ret; \ int __ret; \
local_irq_disable(); \ local_irq_save(flags); \
__ret = spin_trylock(lock); \ __ret = spin_trylock(lock); \
if (!__ret) \ if (!__ret) \
local_irq_enable(); \ local_irq_restore(flags); \
__ret; \ __ret; \
}) })
#define smc_special_lock(lock) spin_lock_irq(lock) #define smc_special_lock(lock, flags) spin_lock_irq(lock, flags)
#define smc_special_unlock(lock) spin_unlock_irq(lock) #define smc_special_unlock(lock, flags) spin_unlock_irqrestore(lock, flags)
#else #else
#define smc_special_trylock(lock) (1) #define smc_special_trylock(lock, flags) (1)
#define smc_special_lock(lock) do { } while (0) #define smc_special_lock(lock, flags) do { } while (0)
#define smc_special_unlock(lock) do { } while (0) #define smc_special_unlock(lock, flags) do { } while (0)
#endif #endif
/* /*
...@@ -548,10 +550,11 @@ static void smc_hardware_send_pkt(unsigned long data) ...@@ -548,10 +550,11 @@ static void smc_hardware_send_pkt(unsigned long data)
struct sk_buff *skb; struct sk_buff *skb;
unsigned int packet_no, len; unsigned int packet_no, len;
unsigned char *buf; unsigned char *buf;
unsigned long flags;
DBG(3, "%s: %s\n", dev->name, __func__); DBG(3, "%s: %s\n", dev->name, __func__);
if (!smc_special_trylock(&lp->lock)) { if (!smc_special_trylock(&lp->lock, flags)) {
netif_stop_queue(dev); netif_stop_queue(dev);
tasklet_schedule(&lp->tx_task); tasklet_schedule(&lp->tx_task);
return; return;
...@@ -559,7 +562,7 @@ static void smc_hardware_send_pkt(unsigned long data) ...@@ -559,7 +562,7 @@ static void smc_hardware_send_pkt(unsigned long data)
skb = lp->pending_tx_skb; skb = lp->pending_tx_skb;
if (unlikely(!skb)) { if (unlikely(!skb)) {
smc_special_unlock(&lp->lock); smc_special_unlock(&lp->lock, flags);
return; return;
} }
lp->pending_tx_skb = NULL; lp->pending_tx_skb = NULL;
...@@ -569,7 +572,7 @@ static void smc_hardware_send_pkt(unsigned long data) ...@@ -569,7 +572,7 @@ static void smc_hardware_send_pkt(unsigned long data)
printk("%s: Memory allocation failed.\n", dev->name); printk("%s: Memory allocation failed.\n", dev->name);
dev->stats.tx_errors++; dev->stats.tx_errors++;
dev->stats.tx_fifo_errors++; dev->stats.tx_fifo_errors++;
smc_special_unlock(&lp->lock); smc_special_unlock(&lp->lock, flags);
goto done; goto done;
} }
...@@ -608,7 +611,7 @@ static void smc_hardware_send_pkt(unsigned long data) ...@@ -608,7 +611,7 @@ static void smc_hardware_send_pkt(unsigned long data)
/* queue the packet for TX */ /* queue the packet for TX */
SMC_SET_MMU_CMD(lp, MC_ENQUEUE); SMC_SET_MMU_CMD(lp, MC_ENQUEUE);
smc_special_unlock(&lp->lock); smc_special_unlock(&lp->lock, flags);
dev->trans_start = jiffies; dev->trans_start = jiffies;
dev->stats.tx_packets++; dev->stats.tx_packets++;
...@@ -633,6 +636,7 @@ static int smc_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -633,6 +636,7 @@ static int smc_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
struct smc_local *lp = netdev_priv(dev); struct smc_local *lp = netdev_priv(dev);
void __iomem *ioaddr = lp->base; void __iomem *ioaddr = lp->base;
unsigned int numPages, poll_count, status; unsigned int numPages, poll_count, status;
unsigned long flags;
DBG(3, "%s: %s\n", dev->name, __func__); DBG(3, "%s: %s\n", dev->name, __func__);
...@@ -658,7 +662,7 @@ static int smc_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -658,7 +662,7 @@ static int smc_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
return 0; return 0;
} }
smc_special_lock(&lp->lock); smc_special_lock(&lp->lock, flags);
/* now, try to allocate the memory */ /* now, try to allocate the memory */
SMC_SET_MMU_CMD(lp, MC_ALLOC | numPages); SMC_SET_MMU_CMD(lp, MC_ALLOC | numPages);
...@@ -676,7 +680,7 @@ static int smc_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -676,7 +680,7 @@ static int smc_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
} }
} while (--poll_count); } while (--poll_count);
smc_special_unlock(&lp->lock); smc_special_unlock(&lp->lock, flags);
lp->pending_tx_skb = skb; lp->pending_tx_skb = skb;
if (!poll_count) { if (!poll_count) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment