Commit 859c3ca1 authored by Sujith Manoharan's avatar Sujith Manoharan Committed by John W. Linville

ath9k_htc: Add a timer to cleanup WMI events

Occasionally, a WMI event would arrive ahead of the TX
URB completion handler. Discarding these events would exhaust
the available TX slots, so handle them by running a timer
cleaning up such events. Also, timeout packets for which TX
completion events have not arrived.
Signed-off-by: default avatarSujith Manoharan <Sujith.Manoharan@atheros.com>
Signed-off-by: default avatarJohn W. Linville <linville@tuxdriver.com>
parent c4d04186
...@@ -262,7 +262,10 @@ struct ath9k_htc_rx { ...@@ -262,7 +262,10 @@ struct ath9k_htc_rx {
spinlock_t rxbuflock; spinlock_t rxbuflock;
}; };
#define ATH9K_HTC_TX_CLEANUP_INTERVAL 50 /* ms */
#define ATH9K_HTC_TX_TIMEOUT_INTERVAL 2500 /* ms */
#define ATH9K_HTC_TX_RESERVE 10 #define ATH9K_HTC_TX_RESERVE 10
#define ATH9K_HTC_TX_TIMEOUT_COUNT 20
#define ATH9K_HTC_TX_THRESHOLD (MAX_TX_BUF_NUM - ATH9K_HTC_TX_RESERVE) #define ATH9K_HTC_TX_THRESHOLD (MAX_TX_BUF_NUM - ATH9K_HTC_TX_RESERVE)
#define ATH9K_HTC_OP_TX_QUEUES_STOP BIT(0) #define ATH9K_HTC_OP_TX_QUEUES_STOP BIT(0)
...@@ -279,6 +282,7 @@ struct ath9k_htc_tx { ...@@ -279,6 +282,7 @@ struct ath9k_htc_tx {
struct sk_buff_head data_vo_queue; struct sk_buff_head data_vo_queue;
struct sk_buff_head tx_failed; struct sk_buff_head tx_failed;
DECLARE_BITMAP(tx_slot, MAX_TX_BUF_NUM); DECLARE_BITMAP(tx_slot, MAX_TX_BUF_NUM);
struct timer_list cleanup_timer;
spinlock_t tx_lock; spinlock_t tx_lock;
}; };
...@@ -287,6 +291,7 @@ struct ath9k_htc_tx_ctl { ...@@ -287,6 +291,7 @@ struct ath9k_htc_tx_ctl {
u8 epid; u8 epid;
u8 txok; u8 txok;
u8 sta_idx; u8 sta_idx;
unsigned long timestamp;
}; };
static inline struct ath9k_htc_tx_ctl *HTC_SKB_CB(struct sk_buff *skb) static inline struct ath9k_htc_tx_ctl *HTC_SKB_CB(struct sk_buff *skb)
...@@ -557,6 +562,7 @@ void ath9k_htc_tx_drain(struct ath9k_htc_priv *priv); ...@@ -557,6 +562,7 @@ void ath9k_htc_tx_drain(struct ath9k_htc_priv *priv);
void ath9k_htc_txstatus(struct ath9k_htc_priv *priv, void *wmi_event); void ath9k_htc_txstatus(struct ath9k_htc_priv *priv, void *wmi_event);
void ath9k_htc_tx_failed(struct ath9k_htc_priv *priv); void ath9k_htc_tx_failed(struct ath9k_htc_priv *priv);
void ath9k_tx_failed_tasklet(unsigned long data); void ath9k_tx_failed_tasklet(unsigned long data);
void ath9k_htc_tx_cleanup_timer(unsigned long data);
int ath9k_rx_init(struct ath9k_htc_priv *priv); int ath9k_rx_init(struct ath9k_htc_priv *priv);
void ath9k_rx_cleanup(struct ath9k_htc_priv *priv); void ath9k_rx_cleanup(struct ath9k_htc_priv *priv);
......
...@@ -671,7 +671,6 @@ static int ath9k_init_priv(struct ath9k_htc_priv *priv, ...@@ -671,7 +671,6 @@ static int ath9k_init_priv(struct ath9k_htc_priv *priv,
common->priv = priv; common->priv = priv;
common->debug_mask = ath9k_debug; common->debug_mask = ath9k_debug;
spin_lock_init(&priv->wmi->wmi_lock);
spin_lock_init(&priv->beacon_lock); spin_lock_init(&priv->beacon_lock);
spin_lock_init(&priv->tx.tx_lock); spin_lock_init(&priv->tx.tx_lock);
mutex_init(&priv->mutex); mutex_init(&priv->mutex);
...@@ -683,6 +682,8 @@ static int ath9k_init_priv(struct ath9k_htc_priv *priv, ...@@ -683,6 +682,8 @@ static int ath9k_init_priv(struct ath9k_htc_priv *priv,
INIT_DELAYED_WORK(&priv->ani_work, ath9k_htc_ani_work); INIT_DELAYED_WORK(&priv->ani_work, ath9k_htc_ani_work);
INIT_WORK(&priv->ps_work, ath9k_ps_work); INIT_WORK(&priv->ps_work, ath9k_ps_work);
INIT_WORK(&priv->fatal_work, ath9k_fatal_work); INIT_WORK(&priv->fatal_work, ath9k_fatal_work);
setup_timer(&priv->tx.cleanup_timer, ath9k_htc_tx_cleanup_timer,
(unsigned long)priv);
/* /*
* Cache line size is used to size and align various * Cache line size is used to size and align various
......
...@@ -194,6 +194,7 @@ void ath9k_htc_reset(struct ath9k_htc_priv *priv) ...@@ -194,6 +194,7 @@ void ath9k_htc_reset(struct ath9k_htc_priv *priv)
ath9k_htc_stop_ani(priv); ath9k_htc_stop_ani(priv);
ieee80211_stop_queues(priv->hw); ieee80211_stop_queues(priv->hw);
del_timer_sync(&priv->tx.cleanup_timer);
ath9k_htc_tx_drain(priv); ath9k_htc_tx_drain(priv);
WMI_CMD(WMI_DISABLE_INTR_CMDID); WMI_CMD(WMI_DISABLE_INTR_CMDID);
...@@ -225,6 +226,9 @@ void ath9k_htc_reset(struct ath9k_htc_priv *priv) ...@@ -225,6 +226,9 @@ void ath9k_htc_reset(struct ath9k_htc_priv *priv)
ath9k_htc_vif_reconfig(priv); ath9k_htc_vif_reconfig(priv);
ieee80211_wake_queues(priv->hw); ieee80211_wake_queues(priv->hw);
mod_timer(&priv->tx.cleanup_timer,
jiffies + msecs_to_jiffies(ATH9K_HTC_TX_CLEANUP_INTERVAL));
ath9k_htc_ps_restore(priv); ath9k_htc_ps_restore(priv);
mutex_unlock(&priv->mutex); mutex_unlock(&priv->mutex);
} }
...@@ -251,6 +255,7 @@ static int ath9k_htc_set_channel(struct ath9k_htc_priv *priv, ...@@ -251,6 +255,7 @@ static int ath9k_htc_set_channel(struct ath9k_htc_priv *priv,
ath9k_htc_ps_wakeup(priv); ath9k_htc_ps_wakeup(priv);
del_timer_sync(&priv->tx.cleanup_timer);
ath9k_htc_tx_drain(priv); ath9k_htc_tx_drain(priv);
WMI_CMD(WMI_DISABLE_INTR_CMDID); WMI_CMD(WMI_DISABLE_INTR_CMDID);
...@@ -301,6 +306,9 @@ static int ath9k_htc_set_channel(struct ath9k_htc_priv *priv, ...@@ -301,6 +306,9 @@ static int ath9k_htc_set_channel(struct ath9k_htc_priv *priv,
!(hw->conf.flags & IEEE80211_CONF_OFFCHANNEL)) !(hw->conf.flags & IEEE80211_CONF_OFFCHANNEL))
ath9k_htc_vif_reconfig(priv); ath9k_htc_vif_reconfig(priv);
mod_timer(&priv->tx.cleanup_timer,
jiffies + msecs_to_jiffies(ATH9K_HTC_TX_CLEANUP_INTERVAL));
err: err:
ath9k_htc_ps_restore(priv); ath9k_htc_ps_restore(priv);
return ret; return ret;
...@@ -937,6 +945,9 @@ static int ath9k_htc_start(struct ieee80211_hw *hw) ...@@ -937,6 +945,9 @@ static int ath9k_htc_start(struct ieee80211_hw *hw)
ieee80211_wake_queues(hw); ieee80211_wake_queues(hw);
mod_timer(&priv->tx.cleanup_timer,
jiffies + msecs_to_jiffies(ATH9K_HTC_TX_CLEANUP_INTERVAL));
if (ah->btcoex_hw.scheme == ATH_BTCOEX_CFG_3WIRE) { if (ah->btcoex_hw.scheme == ATH_BTCOEX_CFG_3WIRE) {
ath9k_hw_btcoex_set_weight(ah, AR_BT_COEX_WGHT, ath9k_hw_btcoex_set_weight(ah, AR_BT_COEX_WGHT,
AR_STOMP_LOW_WLAN_WGHT); AR_STOMP_LOW_WLAN_WGHT);
...@@ -972,6 +983,7 @@ static void ath9k_htc_stop(struct ieee80211_hw *hw) ...@@ -972,6 +983,7 @@ static void ath9k_htc_stop(struct ieee80211_hw *hw)
tasklet_kill(&priv->rx_tasklet); tasklet_kill(&priv->rx_tasklet);
del_timer_sync(&priv->tx.cleanup_timer);
ath9k_htc_tx_drain(priv); ath9k_htc_tx_drain(priv);
ath9k_wmi_event_drain(priv); ath9k_wmi_event_drain(priv);
......
...@@ -495,6 +495,8 @@ static inline void ath9k_htc_tx_drainq(struct ath9k_htc_priv *priv, ...@@ -495,6 +495,8 @@ static inline void ath9k_htc_tx_drainq(struct ath9k_htc_priv *priv,
void ath9k_htc_tx_drain(struct ath9k_htc_priv *priv) void ath9k_htc_tx_drain(struct ath9k_htc_priv *priv)
{ {
struct ath9k_htc_tx_event *event, *tmp;
spin_lock_bh(&priv->tx.tx_lock); spin_lock_bh(&priv->tx.tx_lock);
priv->tx.flags |= ATH9K_HTC_OP_TX_DRAIN; priv->tx.flags |= ATH9K_HTC_OP_TX_DRAIN;
spin_unlock_bh(&priv->tx.tx_lock); spin_unlock_bh(&priv->tx.tx_lock);
...@@ -515,6 +517,16 @@ void ath9k_htc_tx_drain(struct ath9k_htc_priv *priv) ...@@ -515,6 +517,16 @@ void ath9k_htc_tx_drain(struct ath9k_htc_priv *priv)
ath9k_htc_tx_drainq(priv, &priv->tx.data_vo_queue); ath9k_htc_tx_drainq(priv, &priv->tx.data_vo_queue);
ath9k_htc_tx_drainq(priv, &priv->tx.tx_failed); ath9k_htc_tx_drainq(priv, &priv->tx.tx_failed);
/*
* The TX cleanup timer has already been killed.
*/
spin_lock_bh(&priv->wmi->event_lock);
list_for_each_entry_safe(event, tmp, &priv->wmi->pending_tx_events, list) {
list_del(&event->list);
kfree(event);
}
spin_unlock_bh(&priv->wmi->event_lock);
spin_lock_bh(&priv->tx.tx_lock); spin_lock_bh(&priv->tx.tx_lock);
priv->tx.flags &= ~ATH9K_HTC_OP_TX_DRAIN; priv->tx.flags &= ~ATH9K_HTC_OP_TX_DRAIN;
spin_unlock_bh(&priv->tx.tx_lock); spin_unlock_bh(&priv->tx.tx_lock);
...@@ -595,6 +607,7 @@ void ath9k_htc_txstatus(struct ath9k_htc_priv *priv, void *wmi_event) ...@@ -595,6 +607,7 @@ void ath9k_htc_txstatus(struct ath9k_htc_priv *priv, void *wmi_event)
struct wmi_event_txstatus *txs = (struct wmi_event_txstatus *)wmi_event; struct wmi_event_txstatus *txs = (struct wmi_event_txstatus *)wmi_event;
struct __wmi_event_txstatus *__txs; struct __wmi_event_txstatus *__txs;
struct sk_buff *skb; struct sk_buff *skb;
struct ath9k_htc_tx_event *tx_pend;
int i; int i;
for (i = 0; i < txs->cnt; i++) { for (i = 0; i < txs->cnt; i++) {
...@@ -603,8 +616,26 @@ void ath9k_htc_txstatus(struct ath9k_htc_priv *priv, void *wmi_event) ...@@ -603,8 +616,26 @@ void ath9k_htc_txstatus(struct ath9k_htc_priv *priv, void *wmi_event)
__txs = &txs->txstatus[i]; __txs = &txs->txstatus[i];
skb = ath9k_htc_tx_get_packet(priv, __txs); skb = ath9k_htc_tx_get_packet(priv, __txs);
if (!skb) if (!skb) {
/*
* Store this event, so that the TX cleanup
* routine can check later for the needed packet.
*/
tx_pend = kzalloc(sizeof(struct ath9k_htc_tx_event),
GFP_ATOMIC);
if (!tx_pend)
continue;
memcpy(&tx_pend->txs, __txs,
sizeof(struct __wmi_event_txstatus));
spin_lock(&priv->wmi->event_lock);
list_add_tail(&tx_pend->list,
&priv->wmi->pending_tx_events);
spin_unlock(&priv->wmi->event_lock);
continue; continue;
}
ath9k_htc_tx_process(priv, skb, __txs); ath9k_htc_tx_process(priv, skb, __txs);
} }
...@@ -622,6 +653,7 @@ void ath9k_htc_txep(void *drv_priv, struct sk_buff *skb, ...@@ -622,6 +653,7 @@ void ath9k_htc_txep(void *drv_priv, struct sk_buff *skb,
tx_ctl = HTC_SKB_CB(skb); tx_ctl = HTC_SKB_CB(skb);
tx_ctl->txok = txok; tx_ctl->txok = txok;
tx_ctl->timestamp = jiffies;
if (!txok) { if (!txok) {
skb_queue_tail(&priv->tx.tx_failed, skb); skb_queue_tail(&priv->tx.tx_failed, skb);
...@@ -638,6 +670,99 @@ void ath9k_htc_txep(void *drv_priv, struct sk_buff *skb, ...@@ -638,6 +670,99 @@ void ath9k_htc_txep(void *drv_priv, struct sk_buff *skb,
skb_queue_tail(epid_queue, skb); skb_queue_tail(epid_queue, skb);
} }
static inline bool check_packet(struct ath9k_htc_priv *priv, struct sk_buff *skb)
{
struct ath_common *common = ath9k_hw_common(priv->ah);
struct ath9k_htc_tx_ctl *tx_ctl;
tx_ctl = HTC_SKB_CB(skb);
if (time_after(jiffies,
tx_ctl->timestamp +
msecs_to_jiffies(ATH9K_HTC_TX_TIMEOUT_INTERVAL))) {
ath_dbg(common, ATH_DBG_XMIT,
"Dropping a packet due to TX timeout\n");
return true;
}
return false;
}
static void ath9k_htc_tx_cleanup_queue(struct ath9k_htc_priv *priv,
struct sk_buff_head *epid_queue)
{
bool process = false;
unsigned long flags;
struct sk_buff *skb, *tmp;
struct sk_buff_head queue;
skb_queue_head_init(&queue);
spin_lock_irqsave(&epid_queue->lock, flags);
skb_queue_walk_safe(epid_queue, skb, tmp) {
if (check_packet(priv, skb)) {
__skb_unlink(skb, epid_queue);
__skb_queue_tail(&queue, skb);
process = true;
}
}
spin_unlock_irqrestore(&epid_queue->lock, flags);
if (process) {
skb_queue_walk_safe(&queue, skb, tmp) {
__skb_unlink(skb, &queue);
ath9k_htc_tx_process(priv, skb, NULL);
}
}
}
void ath9k_htc_tx_cleanup_timer(unsigned long data)
{
struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *) data;
struct ath_common *common = ath9k_hw_common(priv->ah);
struct ath9k_htc_tx_event *event, *tmp;
struct sk_buff *skb;
spin_lock(&priv->wmi->event_lock);
list_for_each_entry_safe(event, tmp, &priv->wmi->pending_tx_events, list) {
skb = ath9k_htc_tx_get_packet(priv, &event->txs);
if (skb) {
ath_dbg(common, ATH_DBG_XMIT,
"Found packet for cookie: %d, epid: %d\n",
event->txs.cookie,
MS(event->txs.ts_rate, ATH9K_HTC_TXSTAT_EPID));
ath9k_htc_tx_process(priv, skb, &event->txs);
list_del(&event->list);
kfree(event);
continue;
}
if (++event->count >= ATH9K_HTC_TX_TIMEOUT_COUNT) {
list_del(&event->list);
kfree(event);
}
}
spin_unlock(&priv->wmi->event_lock);
/*
* Check if status-pending packets have to be cleaned up.
*/
ath9k_htc_tx_cleanup_queue(priv, &priv->tx.mgmt_ep_queue);
ath9k_htc_tx_cleanup_queue(priv, &priv->tx.cab_ep_queue);
ath9k_htc_tx_cleanup_queue(priv, &priv->tx.data_be_queue);
ath9k_htc_tx_cleanup_queue(priv, &priv->tx.data_bk_queue);
ath9k_htc_tx_cleanup_queue(priv, &priv->tx.data_vi_queue);
ath9k_htc_tx_cleanup_queue(priv, &priv->tx.data_vo_queue);
/* Wake TX queues if needed */
ath9k_htc_check_wake_queues(priv);
mod_timer(&priv->tx.cleanup_timer,
jiffies + msecs_to_jiffies(ATH9K_HTC_TX_CLEANUP_INTERVAL));
}
int ath9k_tx_init(struct ath9k_htc_priv *priv) int ath9k_tx_init(struct ath9k_htc_priv *priv)
{ {
skb_queue_head_init(&priv->tx.mgmt_ep_queue); skb_queue_head_init(&priv->tx.mgmt_ep_queue);
......
...@@ -91,9 +91,12 @@ struct wmi *ath9k_init_wmi(struct ath9k_htc_priv *priv) ...@@ -91,9 +91,12 @@ struct wmi *ath9k_init_wmi(struct ath9k_htc_priv *priv)
wmi->drv_priv = priv; wmi->drv_priv = priv;
wmi->stopped = false; wmi->stopped = false;
skb_queue_head_init(&wmi->wmi_event_queue); skb_queue_head_init(&wmi->wmi_event_queue);
spin_lock_init(&wmi->wmi_lock);
spin_lock_init(&wmi->event_lock);
mutex_init(&wmi->op_mutex); mutex_init(&wmi->op_mutex);
mutex_init(&wmi->multi_write_mutex); mutex_init(&wmi->multi_write_mutex);
init_completion(&wmi->cmd_wait); init_completion(&wmi->cmd_wait);
INIT_LIST_HEAD(&wmi->pending_tx_events);
tasklet_init(&wmi->wmi_event_tasklet, ath9k_wmi_event_tasklet, tasklet_init(&wmi->wmi_event_tasklet, ath9k_wmi_event_tasklet,
(unsigned long)wmi); (unsigned long)wmi);
......
...@@ -130,6 +130,12 @@ struct register_write { ...@@ -130,6 +130,12 @@ struct register_write {
__be32 val; __be32 val;
}; };
struct ath9k_htc_tx_event {
int count;
struct __wmi_event_txstatus txs;
struct list_head list;
};
struct wmi { struct wmi {
struct ath9k_htc_priv *drv_priv; struct ath9k_htc_priv *drv_priv;
struct htc_target *htc; struct htc_target *htc;
...@@ -144,6 +150,9 @@ struct wmi { ...@@ -144,6 +150,9 @@ struct wmi {
u32 cmd_rsp_len; u32 cmd_rsp_len;
bool stopped; bool stopped;
struct list_head pending_tx_events;
spinlock_t event_lock;
spinlock_t wmi_lock; spinlock_t wmi_lock;
atomic_t mwrite_cnt; atomic_t mwrite_cnt;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment