Commit 05b64b36 authored by Michael Buesch's avatar Michael Buesch Committed by David S. Miller

[B43]: Rewrite pwork locking policy.

Implement much easier and more lightweight locking for
the periodic work.
This also removes the last big busywait loop and replaces it
by a sleeping loop.
Signed-off-by: default avatarMichael Buesch <mb@bu3sch.de>
Signed-off-by: default avatarJohn W. Linville <linville@tuxdriver.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 42bb4cd5
...@@ -1976,6 +1976,7 @@ void b43_mac_enable(struct b43_wldev *dev) ...@@ -1976,6 +1976,7 @@ void b43_mac_enable(struct b43_wldev *dev)
{ {
dev->mac_suspended--; dev->mac_suspended--;
B43_WARN_ON(dev->mac_suspended < 0); B43_WARN_ON(dev->mac_suspended < 0);
B43_WARN_ON(irqs_disabled());
if (dev->mac_suspended == 0) { if (dev->mac_suspended == 0) {
b43_write32(dev, B43_MMIO_MACCTL, b43_write32(dev, B43_MMIO_MACCTL,
b43_read32(dev, B43_MMIO_MACCTL) b43_read32(dev, B43_MMIO_MACCTL)
...@@ -1986,6 +1987,11 @@ void b43_mac_enable(struct b43_wldev *dev) ...@@ -1986,6 +1987,11 @@ void b43_mac_enable(struct b43_wldev *dev)
b43_read32(dev, B43_MMIO_MACCTL); b43_read32(dev, B43_MMIO_MACCTL);
b43_read32(dev, B43_MMIO_GEN_IRQ_REASON); b43_read32(dev, B43_MMIO_GEN_IRQ_REASON);
b43_power_saving_ctl_bits(dev, 0); b43_power_saving_ctl_bits(dev, 0);
/* Re-enable IRQs. */
spin_lock_irq(&dev->wl->irq_lock);
b43_interrupt_enable(dev, dev->irq_savedstate);
spin_unlock_irq(&dev->wl->irq_lock);
} }
} }
...@@ -1995,23 +2001,34 @@ void b43_mac_suspend(struct b43_wldev *dev) ...@@ -1995,23 +2001,34 @@ void b43_mac_suspend(struct b43_wldev *dev)
int i; int i;
u32 tmp; u32 tmp;
might_sleep();
B43_WARN_ON(irqs_disabled());
B43_WARN_ON(dev->mac_suspended < 0); B43_WARN_ON(dev->mac_suspended < 0);
if (dev->mac_suspended == 0) { if (dev->mac_suspended == 0) {
/* Mask IRQs before suspending MAC. Otherwise
* the MAC stays busy and won't suspend. */
spin_lock_irq(&dev->wl->irq_lock);
tmp = b43_interrupt_disable(dev, B43_IRQ_ALL);
spin_unlock_irq(&dev->wl->irq_lock);
b43_synchronize_irq(dev);
dev->irq_savedstate = tmp;
b43_power_saving_ctl_bits(dev, B43_PS_AWAKE); b43_power_saving_ctl_bits(dev, B43_PS_AWAKE);
b43_write32(dev, B43_MMIO_MACCTL, b43_write32(dev, B43_MMIO_MACCTL,
b43_read32(dev, B43_MMIO_MACCTL) b43_read32(dev, B43_MMIO_MACCTL)
& ~B43_MACCTL_ENABLED); & ~B43_MACCTL_ENABLED);
/* force pci to flush the write */ /* force pci to flush the write */
b43_read32(dev, B43_MMIO_MACCTL); b43_read32(dev, B43_MMIO_MACCTL);
for (i = 10000; i; i--) { for (i = 40; i; i--) {
tmp = b43_read32(dev, B43_MMIO_GEN_IRQ_REASON); tmp = b43_read32(dev, B43_MMIO_GEN_IRQ_REASON);
if (tmp & B43_IRQ_MAC_SUSPENDED) if (tmp & B43_IRQ_MAC_SUSPENDED)
goto out; goto out;
udelay(1); msleep(1);
} }
b43err(dev->wl, "MAC suspend failed\n"); b43err(dev->wl, "MAC suspend failed\n");
} }
out: out:
dev->mac_suspended++; dev->mac_suspended++;
} }
...@@ -2349,77 +2366,36 @@ static void do_periodic_work(struct b43_wldev *dev) ...@@ -2349,77 +2366,36 @@ static void do_periodic_work(struct b43_wldev *dev)
b43_periodic_every15sec(dev); b43_periodic_every15sec(dev);
} }
/* Estimate a "Badness" value based on the periodic work /* Periodic work locking policy:
* state-machine state. "Badness" is worse (bigger), if the * The whole periodic work handler is protected by
* periodic work will take longer. * wl->mutex. If another lock is needed somewhere in the
* pwork callchain, it's aquired in-place, where it's needed.
*/ */
static int estimate_periodic_work_badness(unsigned int state)
{
int badness = 0;
if (state % 8 == 0) /* every 120 sec */
badness += 10;
if (state % 4 == 0) /* every 60 sec */
badness += 5;
if (state % 2 == 0) /* every 30 sec */
badness += 1;
#define BADNESS_LIMIT 4
return badness;
}
static void b43_periodic_work_handler(struct work_struct *work) static void b43_periodic_work_handler(struct work_struct *work)
{ {
struct b43_wldev *dev = struct b43_wldev *dev = container_of(work, struct b43_wldev,
container_of(work, struct b43_wldev, periodic_work.work); periodic_work.work);
unsigned long flags, delay; struct b43_wl *wl = dev->wl;
u32 savedirqs = 0; unsigned long delay;
int badness;
mutex_lock(&dev->wl->mutex); mutex_lock(&wl->mutex);
if (unlikely(b43_status(dev) != B43_STAT_STARTED)) if (unlikely(b43_status(dev) != B43_STAT_STARTED))
goto out; goto out;
if (b43_debug(dev, B43_DBG_PWORK_STOP)) if (b43_debug(dev, B43_DBG_PWORK_STOP))
goto out_requeue; goto out_requeue;
badness = estimate_periodic_work_badness(dev->periodic_state); do_periodic_work(dev);
if (badness > BADNESS_LIMIT) {
spin_lock_irqsave(&dev->wl->irq_lock, flags);
/* Suspend TX as we don't want to transmit packets while
* we recalibrate the hardware. */
b43_tx_suspend(dev);
savedirqs = b43_interrupt_disable(dev, B43_IRQ_ALL);
/* Periodic work will take a long time, so we want it to
* be preemtible and release the spinlock. */
spin_unlock_irqrestore(&dev->wl->irq_lock, flags);
b43_synchronize_irq(dev);
do_periodic_work(dev);
spin_lock_irqsave(&dev->wl->irq_lock, flags);
b43_interrupt_enable(dev, savedirqs);
b43_tx_resume(dev);
mmiowb();
spin_unlock_irqrestore(&dev->wl->irq_lock, flags);
} else {
/* Take the global driver lock. This will lock any operation. */
spin_lock_irqsave(&dev->wl->irq_lock, flags);
do_periodic_work(dev);
mmiowb();
spin_unlock_irqrestore(&dev->wl->irq_lock, flags);
}
dev->periodic_state++; dev->periodic_state++;
out_requeue: out_requeue:
if (b43_debug(dev, B43_DBG_PWORK_FAST)) if (b43_debug(dev, B43_DBG_PWORK_FAST))
delay = msecs_to_jiffies(50); delay = msecs_to_jiffies(50);
else else
delay = round_jiffies(HZ * 15); delay = round_jiffies(HZ * 15);
queue_delayed_work(dev->wl->hw->workqueue, &dev->periodic_work, delay); queue_delayed_work(wl->hw->workqueue, &dev->periodic_work, delay);
out: out:
mutex_unlock(&dev->wl->mutex); mutex_unlock(&wl->mutex);
} }
static void b43_periodic_tasks_setup(struct b43_wldev *dev) static void b43_periodic_tasks_setup(struct b43_wldev *dev)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment