Commit cc4bf501 authored by Luciano Coelho's avatar Luciano Coelho

Merge branch 'wl12xx-next' into for-linville

parents 41b58f18 55df5afb
...@@ -459,23 +459,39 @@ int wl12xx_cmd_role_disable(struct wl1271 *wl, u8 *role_id) ...@@ -459,23 +459,39 @@ int wl12xx_cmd_role_disable(struct wl1271 *wl, u8 *role_id)
int wl12xx_allocate_link(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 *hlid) int wl12xx_allocate_link(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 *hlid)
{ {
unsigned long flags;
u8 link = find_first_zero_bit(wl->links_map, WL12XX_MAX_LINKS); u8 link = find_first_zero_bit(wl->links_map, WL12XX_MAX_LINKS);
if (link >= WL12XX_MAX_LINKS) if (link >= WL12XX_MAX_LINKS)
return -EBUSY; return -EBUSY;
/* these bits are used by op_tx */
spin_lock_irqsave(&wl->wl_lock, flags);
__set_bit(link, wl->links_map); __set_bit(link, wl->links_map);
__set_bit(link, wlvif->links_map); __set_bit(link, wlvif->links_map);
spin_unlock_irqrestore(&wl->wl_lock, flags);
*hlid = link; *hlid = link;
return 0; return 0;
} }
void wl12xx_free_link(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 *hlid) void wl12xx_free_link(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 *hlid)
{ {
unsigned long flags;
if (*hlid == WL12XX_INVALID_LINK_ID) if (*hlid == WL12XX_INVALID_LINK_ID)
return; return;
/* these bits are used by op_tx */
spin_lock_irqsave(&wl->wl_lock, flags);
__clear_bit(*hlid, wl->links_map); __clear_bit(*hlid, wl->links_map);
__clear_bit(*hlid, wlvif->links_map); __clear_bit(*hlid, wlvif->links_map);
spin_unlock_irqrestore(&wl->wl_lock, flags);
/*
* At this point op_tx() will not add more packets to the queues. We
* can purge them.
*/
wl1271_tx_reset_link_queues(wl, *hlid);
*hlid = WL12XX_INVALID_LINK_ID; *hlid = WL12XX_INVALID_LINK_ID;
} }
...@@ -515,7 +531,7 @@ static int wl12xx_cmd_role_start_dev(struct wl1271 *wl, ...@@ -515,7 +531,7 @@ static int wl12xx_cmd_role_start_dev(struct wl1271 *wl,
goto out_free; goto out_free;
} }
cmd->device.hlid = wlvif->dev_hlid; cmd->device.hlid = wlvif->dev_hlid;
cmd->device.session = wlvif->session_counter; cmd->device.session = wl12xx_get_new_session_id(wl, wlvif);
wl1271_debug(DEBUG_CMD, "role start: roleid=%d, hlid=%d, session=%d", wl1271_debug(DEBUG_CMD, "role start: roleid=%d, hlid=%d, session=%d",
cmd->role_id, cmd->device.hlid, cmd->device.session); cmd->role_id, cmd->device.hlid, cmd->device.session);
...@@ -1802,6 +1818,14 @@ int wl12xx_croc(struct wl1271 *wl, u8 role_id) ...@@ -1802,6 +1818,14 @@ int wl12xx_croc(struct wl1271 *wl, u8 role_id)
goto out; goto out;
__clear_bit(role_id, wl->roc_map); __clear_bit(role_id, wl->roc_map);
/*
* Rearm the tx watchdog when removing the last ROC. This prevents
* recoveries due to just finished ROCs - when Tx hasn't yet had
* a chance to get out.
*/
if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) >= WL12XX_MAX_ROLES)
wl12xx_rearm_tx_watchdog_locked(wl);
out: out:
return ret; return ret;
} }
......
...@@ -690,6 +690,9 @@ struct conf_tx_settings { ...@@ -690,6 +690,9 @@ struct conf_tx_settings {
*/ */
u8 tmpl_short_retry_limit; u8 tmpl_short_retry_limit;
u8 tmpl_long_retry_limit; u8 tmpl_long_retry_limit;
/* Time in ms for Tx watchdog timer to expire */
u32 tx_watchdog_timeout;
}; };
enum { enum {
......
This diff is collapsed.
...@@ -69,8 +69,6 @@ void wl1271_elp_work(struct work_struct *work) ...@@ -69,8 +69,6 @@ void wl1271_elp_work(struct work_struct *work)
mutex_unlock(&wl->mutex); mutex_unlock(&wl->mutex);
} }
#define ELP_ENTRY_DELAY 5
/* Routines to toggle sleep mode while in ELP */ /* Routines to toggle sleep mode while in ELP */
void wl1271_ps_elp_sleep(struct wl1271 *wl) void wl1271_ps_elp_sleep(struct wl1271 *wl)
{ {
...@@ -90,7 +88,7 @@ void wl1271_ps_elp_sleep(struct wl1271 *wl) ...@@ -90,7 +88,7 @@ void wl1271_ps_elp_sleep(struct wl1271 *wl)
} }
ieee80211_queue_delayed_work(wl->hw, &wl->elp_work, ieee80211_queue_delayed_work(wl->hw, &wl->elp_work,
msecs_to_jiffies(ELP_ENTRY_DELAY)); msecs_to_jiffies(wl->conf.conn.dynamic_ps_timeout));
} }
int wl1271_ps_elp_wakeup(struct wl1271 *wl) int wl1271_ps_elp_wakeup(struct wl1271 *wl)
......
...@@ -55,6 +55,12 @@ void wl1271_scan_complete_work(struct work_struct *work) ...@@ -55,6 +55,12 @@ void wl1271_scan_complete_work(struct work_struct *work)
vif = wl->scan_vif; vif = wl->scan_vif;
wlvif = wl12xx_vif_to_data(vif); wlvif = wl12xx_vif_to_data(vif);
/*
* Rearm the tx watchdog just before idling scan. This
* prevents just-finished scans from triggering the watchdog
*/
wl12xx_rearm_tx_watchdog_locked(wl);
wl->scan.state = WL1271_SCAN_STATE_IDLE; wl->scan.state = WL1271_SCAN_STATE_IDLE;
memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch)); memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
wl->scan.req = NULL; wl->scan.req = NULL;
......
...@@ -226,6 +226,10 @@ static int wl1271_tx_allocate(struct wl1271 *wl, struct wl12xx_vif *wlvif, ...@@ -226,6 +226,10 @@ static int wl1271_tx_allocate(struct wl1271 *wl, struct wl12xx_vif *wlvif,
wl->tx_blocks_available -= total_blocks; wl->tx_blocks_available -= total_blocks;
wl->tx_allocated_blocks += total_blocks; wl->tx_allocated_blocks += total_blocks;
/* If the FW was empty before, arm the Tx watchdog */
if (wl->tx_allocated_blocks == total_blocks)
wl12xx_rearm_tx_watchdog_locked(wl);
ac = wl1271_tx_get_queue(skb_get_queue_mapping(skb)); ac = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
wl->tx_allocated_pkts[ac]++; wl->tx_allocated_pkts[ac]++;
...@@ -527,6 +531,7 @@ static struct sk_buff *wl12xx_lnk_skb_dequeue(struct wl1271 *wl, ...@@ -527,6 +531,7 @@ static struct sk_buff *wl12xx_lnk_skb_dequeue(struct wl1271 *wl,
if (skb) { if (skb) {
int q = wl1271_tx_get_queue(skb_get_queue_mapping(skb)); int q = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
spin_lock_irqsave(&wl->wl_lock, flags); spin_lock_irqsave(&wl->wl_lock, flags);
WARN_ON_ONCE(wl->tx_queue_count[q] <= 0);
wl->tx_queue_count[q]--; wl->tx_queue_count[q]--;
spin_unlock_irqrestore(&wl->wl_lock, flags); spin_unlock_irqrestore(&wl->wl_lock, flags);
} }
...@@ -571,6 +576,7 @@ static struct sk_buff *wl1271_skb_dequeue(struct wl1271 *wl) ...@@ -571,6 +576,7 @@ static struct sk_buff *wl1271_skb_dequeue(struct wl1271 *wl)
struct wl12xx_vif *wlvif = wl->last_wlvif; struct wl12xx_vif *wlvif = wl->last_wlvif;
struct sk_buff *skb = NULL; struct sk_buff *skb = NULL;
/* continue from last wlvif (round robin) */
if (wlvif) { if (wlvif) {
wl12xx_for_each_wlvif_continue(wl, wlvif) { wl12xx_for_each_wlvif_continue(wl, wlvif) {
skb = wl12xx_vif_skb_dequeue(wl, wlvif); skb = wl12xx_vif_skb_dequeue(wl, wlvif);
...@@ -581,7 +587,11 @@ static struct sk_buff *wl1271_skb_dequeue(struct wl1271 *wl) ...@@ -581,7 +587,11 @@ static struct sk_buff *wl1271_skb_dequeue(struct wl1271 *wl)
} }
} }
/* do another pass */ /* dequeue from the system HLID before the restarting wlvif list */
if (!skb)
skb = wl12xx_lnk_skb_dequeue(wl, &wl->links[wl->system_hlid]);
/* do a new pass over the wlvif list */
if (!skb) { if (!skb) {
wl12xx_for_each_wlvif(wl, wlvif) { wl12xx_for_each_wlvif(wl, wlvif) {
skb = wl12xx_vif_skb_dequeue(wl, wlvif); skb = wl12xx_vif_skb_dequeue(wl, wlvif);
...@@ -589,12 +599,16 @@ static struct sk_buff *wl1271_skb_dequeue(struct wl1271 *wl) ...@@ -589,12 +599,16 @@ static struct sk_buff *wl1271_skb_dequeue(struct wl1271 *wl)
wl->last_wlvif = wlvif; wl->last_wlvif = wlvif;
break; break;
} }
/*
* No need to continue after last_wlvif. The previous
* pass should have found it.
*/
if (wlvif == wl->last_wlvif)
break;
} }
} }
if (!skb)
skb = wl12xx_lnk_skb_dequeue(wl, &wl->links[wl->system_hlid]);
if (!skb && if (!skb &&
test_and_clear_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags)) { test_and_clear_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags)) {
int q; int q;
...@@ -602,6 +616,7 @@ static struct sk_buff *wl1271_skb_dequeue(struct wl1271 *wl) ...@@ -602,6 +616,7 @@ static struct sk_buff *wl1271_skb_dequeue(struct wl1271 *wl)
skb = wl->dummy_packet; skb = wl->dummy_packet;
q = wl1271_tx_get_queue(skb_get_queue_mapping(skb)); q = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
spin_lock_irqsave(&wl->wl_lock, flags); spin_lock_irqsave(&wl->wl_lock, flags);
WARN_ON_ONCE(wl->tx_queue_count[q] <= 0);
wl->tx_queue_count[q]--; wl->tx_queue_count[q]--;
spin_unlock_irqrestore(&wl->wl_lock, flags); spin_unlock_irqrestore(&wl->wl_lock, flags);
} }
...@@ -959,7 +974,6 @@ void wl12xx_tx_reset_wlvif(struct wl1271 *wl, struct wl12xx_vif *wlvif) ...@@ -959,7 +974,6 @@ void wl12xx_tx_reset_wlvif(struct wl1271 *wl, struct wl12xx_vif *wlvif)
else else
wlvif->sta.ba_rx_bitmap = 0; wlvif->sta.ba_rx_bitmap = 0;
wl1271_tx_reset_link_queues(wl, i);
wl->links[i].allocated_pkts = 0; wl->links[i].allocated_pkts = 0;
wl->links[i].prev_freed_pkts = 0; wl->links[i].prev_freed_pkts = 0;
} }
...@@ -973,8 +987,14 @@ void wl12xx_tx_reset(struct wl1271 *wl, bool reset_tx_queues) ...@@ -973,8 +987,14 @@ void wl12xx_tx_reset(struct wl1271 *wl, bool reset_tx_queues)
struct sk_buff *skb; struct sk_buff *skb;
struct ieee80211_tx_info *info; struct ieee80211_tx_info *info;
/* only reset the queues if something bad happened */
if (WARN_ON_ONCE(wl1271_tx_total_queue_count(wl) != 0)) {
for (i = 0; i < WL12XX_MAX_LINKS; i++)
wl1271_tx_reset_link_queues(wl, i);
for (i = 0; i < NUM_TX_QUEUES; i++) for (i = 0; i < NUM_TX_QUEUES; i++)
wl->tx_queue_count[i] = 0; wl->tx_queue_count[i] = 0;
}
wl->stopped_queues_map = 0; wl->stopped_queues_map = 0;
...@@ -1024,6 +1044,7 @@ void wl12xx_tx_reset(struct wl1271 *wl, bool reset_tx_queues) ...@@ -1024,6 +1044,7 @@ void wl12xx_tx_reset(struct wl1271 *wl, bool reset_tx_queues)
void wl1271_tx_flush(struct wl1271 *wl) void wl1271_tx_flush(struct wl1271 *wl)
{ {
unsigned long timeout; unsigned long timeout;
int i;
timeout = jiffies + usecs_to_jiffies(WL1271_TX_FLUSH_TIMEOUT); timeout = jiffies + usecs_to_jiffies(WL1271_TX_FLUSH_TIMEOUT);
while (!time_after(jiffies, timeout)) { while (!time_after(jiffies, timeout)) {
...@@ -1041,6 +1062,12 @@ void wl1271_tx_flush(struct wl1271 *wl) ...@@ -1041,6 +1062,12 @@ void wl1271_tx_flush(struct wl1271 *wl)
} }
wl1271_warning("Unable to flush all TX buffers, timed out."); wl1271_warning("Unable to flush all TX buffers, timed out.");
/* forcibly flush all Tx buffers on our queues */
mutex_lock(&wl->mutex);
for (i = 0; i < WL12XX_MAX_LINKS; i++)
wl1271_tx_reset_link_queues(wl, i);
mutex_unlock(&wl->mutex);
} }
u32 wl1271_tx_min_rate_get(struct wl1271 *wl, u32 rate_set) u32 wl1271_tx_min_rate_get(struct wl1271 *wl, u32 rate_set)
......
...@@ -227,5 +227,6 @@ void wl12xx_rearm_rx_streaming(struct wl1271 *wl, unsigned long *active_hlids); ...@@ -227,5 +227,6 @@ void wl12xx_rearm_rx_streaming(struct wl1271 *wl, unsigned long *active_hlids);
/* from main.c */ /* from main.c */
void wl1271_free_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 hlid); void wl1271_free_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 hlid);
void wl12xx_rearm_tx_watchdog_locked(struct wl1271 *wl);
#endif #endif
...@@ -260,11 +260,13 @@ enum wl12xx_flags { ...@@ -260,11 +260,13 @@ enum wl12xx_flags {
WL1271_FLAG_SOFT_GEMINI, WL1271_FLAG_SOFT_GEMINI,
WL1271_FLAG_RECOVERY_IN_PROGRESS, WL1271_FLAG_RECOVERY_IN_PROGRESS,
WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, WL1271_FLAG_VIF_CHANGE_IN_PROGRESS,
WL1271_FLAG_INTENDED_FW_RECOVERY,
}; };
enum wl12xx_vif_flags { enum wl12xx_vif_flags {
WLVIF_FLAG_INITIALIZED, WLVIF_FLAG_INITIALIZED,
WLVIF_FLAG_STA_ASSOCIATED, WLVIF_FLAG_STA_ASSOCIATED,
WLVIF_FLAG_STA_AUTHORIZED,
WLVIF_FLAG_IBSS_JOINED, WLVIF_FLAG_IBSS_JOINED,
WLVIF_FLAG_AP_STARTED, WLVIF_FLAG_AP_STARTED,
WLVIF_FLAG_IN_PS, WLVIF_FLAG_IN_PS,
...@@ -452,8 +454,6 @@ struct wl1271 { ...@@ -452,8 +454,6 @@ struct wl1271 {
bool enable_11a; bool enable_11a;
struct list_head list;
/* Most recently reported noise in dBm */ /* Most recently reported noise in dBm */
s8 noise; s8 noise;
...@@ -495,6 +495,9 @@ struct wl1271 { ...@@ -495,6 +495,9 @@ struct wl1271 {
/* last wlvif we transmitted from */ /* last wlvif we transmitted from */
struct wl12xx_vif *last_wlvif; struct wl12xx_vif *last_wlvif;
/* work to fire when Tx is stuck */
struct delayed_work tx_watchdog_work;
}; };
struct wl1271_station { struct wl1271_station {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment