Commit a522550a authored by Ido Yariv's avatar Ido Yariv Committed by John W. Linville

wl1271: Fix TX starvation

While wl1271_irq_work handles RX directly (by calling wl1271_rx), a different
work is scheduled for transmitting packets. The IRQ work might handle more than
one interrupt during a single call, including multiple TX completion
interrupts. This might starve TX, since no packets are transmitted until all
interrupts are handled.

Fix this by calling the TX work function directly, instead of deferring
it.
Signed-off-by: default avatarIdo Yariv <ido@wizery.com>
Reviewed-by: default avatarJuuso Oikarinen <juuso.oikarinen@nokia.com>
Signed-off-by: default avatarLuciano Coelho <luciano.coelho@nokia.com>
parent 6c6e669e
...@@ -351,6 +351,7 @@ struct wl1271 { ...@@ -351,6 +351,7 @@ struct wl1271 {
#define WL1271_FLAG_IDLE_REQUESTED (11) #define WL1271_FLAG_IDLE_REQUESTED (11)
#define WL1271_FLAG_PSPOLL_FAILURE (12) #define WL1271_FLAG_PSPOLL_FAILURE (12)
#define WL1271_FLAG_STA_STATE_SENT (13) #define WL1271_FLAG_STA_STATE_SENT (13)
#define WL1271_FLAG_FW_TX_BUSY (14)
unsigned long flags; unsigned long flags;
struct wl1271_partition_set part; struct wl1271_partition_set part;
......
...@@ -481,9 +481,9 @@ static void wl1271_fw_status(struct wl1271 *wl, ...@@ -481,9 +481,9 @@ static void wl1271_fw_status(struct wl1271 *wl,
total += cnt; total += cnt;
} }
/* if more blocks are available now, schedule some tx work */ /* if more blocks are available now, tx work can be scheduled */
if (total && !skb_queue_empty(&wl->tx_queue)) if (total)
ieee80211_queue_work(wl->hw, &wl->tx_work); clear_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags);
/* update the host-chipset time offset */ /* update the host-chipset time offset */
getnstimeofday(&ts); getnstimeofday(&ts);
...@@ -537,6 +537,16 @@ static void wl1271_irq_work(struct work_struct *work) ...@@ -537,6 +537,16 @@ static void wl1271_irq_work(struct work_struct *work)
(wl->tx_results_count & 0xff)) (wl->tx_results_count & 0xff))
wl1271_tx_complete(wl); wl1271_tx_complete(wl);
/* Check if any tx blocks were freed */
if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
!skb_queue_empty(&wl->tx_queue)) {
/*
* In order to avoid starvation of the TX path,
* call the work function directly.
*/
wl1271_tx_work_locked(wl);
}
wl1271_rx(wl, wl->fw_status); wl1271_rx(wl, wl->fw_status);
} }
...@@ -867,7 +877,8 @@ static int wl1271_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb) ...@@ -867,7 +877,8 @@ static int wl1271_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
* before that, the tx_work will not be initialized! * before that, the tx_work will not be initialized!
*/ */
ieee80211_queue_work(wl->hw, &wl->tx_work); if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags))
ieee80211_queue_work(wl->hw, &wl->tx_work);
/* /*
* The workqueue is slow to process the tx_queue and we need stop * The workqueue is slow to process the tx_queue and we need stop
......
...@@ -204,9 +204,8 @@ u32 wl1271_tx_enabled_rates_get(struct wl1271 *wl, u32 rate_set) ...@@ -204,9 +204,8 @@ u32 wl1271_tx_enabled_rates_get(struct wl1271 *wl, u32 rate_set)
return enabled_rates; return enabled_rates;
} }
void wl1271_tx_work(struct work_struct *work) void wl1271_tx_work_locked(struct wl1271 *wl)
{ {
struct wl1271 *wl = container_of(work, struct wl1271, tx_work);
struct sk_buff *skb; struct sk_buff *skb;
bool woken_up = false; bool woken_up = false;
u32 sta_rates = 0; u32 sta_rates = 0;
...@@ -223,8 +222,6 @@ void wl1271_tx_work(struct work_struct *work) ...@@ -223,8 +222,6 @@ void wl1271_tx_work(struct work_struct *work)
spin_unlock_irqrestore(&wl->wl_lock, flags); spin_unlock_irqrestore(&wl->wl_lock, flags);
} }
mutex_lock(&wl->mutex);
if (unlikely(wl->state == WL1271_STATE_OFF)) if (unlikely(wl->state == WL1271_STATE_OFF))
goto out; goto out;
...@@ -260,6 +257,8 @@ void wl1271_tx_work(struct work_struct *work) ...@@ -260,6 +257,8 @@ void wl1271_tx_work(struct work_struct *work)
* Queue back last skb, and stop aggregating. * Queue back last skb, and stop aggregating.
*/ */
skb_queue_head(&wl->tx_queue, skb); skb_queue_head(&wl->tx_queue, skb);
/* No work left, avoid scheduling redundant tx work */
set_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags);
goto out_ack; goto out_ack;
} else if (ret < 0) { } else if (ret < 0) {
dev_kfree_skb(skb); dev_kfree_skb(skb);
...@@ -283,7 +282,14 @@ void wl1271_tx_work(struct work_struct *work) ...@@ -283,7 +282,14 @@ void wl1271_tx_work(struct work_struct *work)
out: out:
if (woken_up) if (woken_up)
wl1271_ps_elp_sleep(wl); wl1271_ps_elp_sleep(wl);
}
void wl1271_tx_work(struct work_struct *work)
{
struct wl1271 *wl = container_of(work, struct wl1271, tx_work);
mutex_lock(&wl->mutex);
wl1271_tx_work_locked(wl);
mutex_unlock(&wl->mutex); mutex_unlock(&wl->mutex);
} }
......
...@@ -140,6 +140,7 @@ static inline int wl1271_tx_get_queue(int queue) ...@@ -140,6 +140,7 @@ static inline int wl1271_tx_get_queue(int queue)
} }
void wl1271_tx_work(struct work_struct *work); void wl1271_tx_work(struct work_struct *work);
void wl1271_tx_work_locked(struct wl1271 *wl);
void wl1271_tx_complete(struct wl1271 *wl); void wl1271_tx_complete(struct wl1271 *wl);
void wl1271_tx_reset(struct wl1271 *wl); void wl1271_tx_reset(struct wl1271 *wl);
void wl1271_tx_flush(struct wl1271 *wl); void wl1271_tx_flush(struct wl1271 *wl);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment