Commit a6a67db2 authored by Johannes Berg's avatar Johannes Berg Committed by John W. Linville

mac80211: refcount aggregation queue stop

mac80211 currently maintains the ampdu_lock to
avoid starting a queue due to one aggregation
session while another aggregation session needs
the queue stopped.

We can do better, however, and instead refcount
the queue stops for this particular purpose,
thus removing the need for the lock. This will
help making ampdu_action able to sleep.
Signed-off-by: default avatarJohannes Berg <johannes.berg@intel.com>
Signed-off-by: default avatarJohn W. Linville <linville@tuxdriver.com>
parent 5d22c89b
...@@ -220,6 +220,41 @@ static inline int ieee80211_ac_from_tid(int tid) ...@@ -220,6 +220,41 @@ static inline int ieee80211_ac_from_tid(int tid)
return ieee802_1d_to_ac[tid & 7]; return ieee802_1d_to_ac[tid & 7];
} }
/*
* When multiple aggregation sessions on multiple stations
* are being created/destroyed simultaneously, we need to
* refcount the global queue stop caused by that in order
* to not get into a situation where one of the aggregation
* setup or teardown re-enables queues before the other is
* ready to handle that.
*
* These two functions take care of this issue by keeping
* a global "agg_queue_stop" refcount.
*/
static void __acquires(agg_queue)
ieee80211_stop_queue_agg(struct ieee80211_local *local, int tid)
{
int queue = ieee80211_ac_from_tid(tid);
if (atomic_inc_return(&local->agg_queue_stop[queue]) == 1)
ieee80211_stop_queue_by_reason(
&local->hw, queue,
IEEE80211_QUEUE_STOP_REASON_AGGREGATION);
__acquire(agg_queue);
}
static void __releases(agg_queue)
ieee80211_wake_queue_agg(struct ieee80211_local *local, int tid)
{
int queue = ieee80211_ac_from_tid(tid);
if (atomic_dec_return(&local->agg_queue_stop[queue]) == 0)
ieee80211_wake_queue_by_reason(
&local->hw, queue,
IEEE80211_QUEUE_STOP_REASON_AGGREGATION);
__release(agg_queue);
}
int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid) int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid)
{ {
struct sta_info *sta = container_of(pubsta, struct sta_info, sta); struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
...@@ -263,7 +298,6 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid) ...@@ -263,7 +298,6 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid)
} }
spin_lock_bh(&sta->lock); spin_lock_bh(&sta->lock);
spin_lock(&local->ampdu_lock);
/* we have tried too many times, receiver does not want A-MPDU */ /* we have tried too many times, receiver does not want A-MPDU */
if (sta->ampdu_mlme.addba_req_num[tid] > HT_AGG_MAX_RETRIES) { if (sta->ampdu_mlme.addba_req_num[tid] > HT_AGG_MAX_RETRIES) {
...@@ -289,9 +323,7 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid) ...@@ -289,9 +323,7 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid)
* which would require us to put them to the AC pending * which would require us to put them to the AC pending
* afterwards which just makes the code more complex. * afterwards which just makes the code more complex.
*/ */
ieee80211_stop_queue_by_reason( ieee80211_stop_queue_agg(local, tid);
&local->hw, ieee80211_ac_from_tid(tid),
IEEE80211_QUEUE_STOP_REASON_AGGREGATION);
/* prepare A-MPDU MLME for Tx aggregation */ /* prepare A-MPDU MLME for Tx aggregation */
tid_tx = kzalloc(sizeof(struct tid_ampdu_tx), GFP_ATOMIC); tid_tx = kzalloc(sizeof(struct tid_ampdu_tx), GFP_ATOMIC);
...@@ -327,11 +359,7 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid) ...@@ -327,11 +359,7 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid)
rcu_assign_pointer(sta->ampdu_mlme.tid_tx[tid], tid_tx); rcu_assign_pointer(sta->ampdu_mlme.tid_tx[tid], tid_tx);
/* Driver vetoed or OKed, but we can take packets again now */ /* Driver vetoed or OKed, but we can take packets again now */
ieee80211_wake_queue_by_reason( ieee80211_wake_queue_agg(local, tid);
&local->hw, ieee80211_ac_from_tid(tid),
IEEE80211_QUEUE_STOP_REASON_AGGREGATION);
spin_unlock(&local->ampdu_lock);
/* activate the timer for the recipient's addBA response */ /* activate the timer for the recipient's addBA response */
tid_tx->addba_resp_timer.expires = jiffies + ADDBA_RESP_INTERVAL; tid_tx->addba_resp_timer.expires = jiffies + ADDBA_RESP_INTERVAL;
...@@ -358,11 +386,8 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid) ...@@ -358,11 +386,8 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid)
err_free: err_free:
kfree(tid_tx); kfree(tid_tx);
err_wake_queue: err_wake_queue:
ieee80211_wake_queue_by_reason( ieee80211_wake_queue_agg(local, tid);
&local->hw, ieee80211_ac_from_tid(tid),
IEEE80211_QUEUE_STOP_REASON_AGGREGATION);
err_unlock_sta: err_unlock_sta:
spin_unlock(&local->ampdu_lock);
spin_unlock_bh(&sta->lock); spin_unlock_bh(&sta->lock);
return ret; return ret;
} }
...@@ -370,19 +395,16 @@ EXPORT_SYMBOL(ieee80211_start_tx_ba_session); ...@@ -370,19 +395,16 @@ EXPORT_SYMBOL(ieee80211_start_tx_ba_session);
/* /*
* splice packets from the STA's pending to the local pending, * splice packets from the STA's pending to the local pending,
* requires a call to ieee80211_agg_splice_finish and holding * requires a call to ieee80211_agg_splice_finish later
* local->ampdu_lock across both calls.
*/ */
static void ieee80211_agg_splice_packets(struct ieee80211_local *local, static void __acquires(agg_queue)
struct tid_ampdu_tx *tid_tx, ieee80211_agg_splice_packets(struct ieee80211_local *local,
u16 tid) struct tid_ampdu_tx *tid_tx, u16 tid)
{ {
int queue = ieee80211_ac_from_tid(tid);
unsigned long flags; unsigned long flags;
u16 queue = ieee80211_ac_from_tid(tid);
ieee80211_stop_queue_by_reason( ieee80211_stop_queue_agg(local, tid);
&local->hw, queue,
IEEE80211_QUEUE_STOP_REASON_AGGREGATION);
if (WARN(!tid_tx, "TID %d gone but expected when splicing aggregates" if (WARN(!tid_tx, "TID %d gone but expected when splicing aggregates"
" from the pending queue\n", tid)) " from the pending queue\n", tid))
...@@ -397,11 +419,10 @@ static void ieee80211_agg_splice_packets(struct ieee80211_local *local, ...@@ -397,11 +419,10 @@ static void ieee80211_agg_splice_packets(struct ieee80211_local *local,
} }
} }
static void ieee80211_agg_splice_finish(struct ieee80211_local *local, u16 tid) static void __releases(agg_queue)
ieee80211_agg_splice_finish(struct ieee80211_local *local, u16 tid)
{ {
ieee80211_wake_queue_by_reason( ieee80211_wake_queue_agg(local, tid);
&local->hw, ieee80211_ac_from_tid(tid),
IEEE80211_QUEUE_STOP_REASON_AGGREGATION);
} }
/* caller must hold sta->lock */ /* caller must hold sta->lock */
...@@ -414,7 +435,6 @@ static void ieee80211_agg_tx_operational(struct ieee80211_local *local, ...@@ -414,7 +435,6 @@ static void ieee80211_agg_tx_operational(struct ieee80211_local *local,
printk(KERN_DEBUG "Aggregation is on for tid %d\n", tid); printk(KERN_DEBUG "Aggregation is on for tid %d\n", tid);
#endif #endif
spin_lock(&local->ampdu_lock);
ieee80211_agg_splice_packets(local, sta->ampdu_mlme.tid_tx[tid], tid); ieee80211_agg_splice_packets(local, sta->ampdu_mlme.tid_tx[tid], tid);
/* /*
* Now mark as operational. This will be visible * Now mark as operational. This will be visible
...@@ -423,7 +443,6 @@ static void ieee80211_agg_tx_operational(struct ieee80211_local *local, ...@@ -423,7 +443,6 @@ static void ieee80211_agg_tx_operational(struct ieee80211_local *local,
*/ */
set_bit(HT_AGG_STATE_OPERATIONAL, &sta->ampdu_mlme.tid_tx[tid]->state); set_bit(HT_AGG_STATE_OPERATIONAL, &sta->ampdu_mlme.tid_tx[tid]->state);
ieee80211_agg_splice_finish(local, tid); ieee80211_agg_splice_finish(local, tid);
spin_unlock(&local->ampdu_lock);
drv_ampdu_action(local, sta->sdata, drv_ampdu_action(local, sta->sdata,
IEEE80211_AMPDU_TX_OPERATIONAL, IEEE80211_AMPDU_TX_OPERATIONAL,
...@@ -604,7 +623,6 @@ void ieee80211_stop_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u8 tid) ...@@ -604,7 +623,6 @@ void ieee80211_stop_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u8 tid)
* more. * more.
*/ */
spin_lock(&local->ampdu_lock);
ieee80211_agg_splice_packets(local, tid_tx, tid); ieee80211_agg_splice_packets(local, tid_tx, tid);
/* future packets must not find the tid_tx struct any more */ /* future packets must not find the tid_tx struct any more */
...@@ -613,7 +631,6 @@ void ieee80211_stop_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u8 tid) ...@@ -613,7 +631,6 @@ void ieee80211_stop_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u8 tid)
ieee80211_agg_splice_finish(local, tid); ieee80211_agg_splice_finish(local, tid);
call_rcu(&tid_tx->rcu_head, kfree_tid_tx); call_rcu(&tid_tx->rcu_head, kfree_tid_tx);
spin_unlock(&local->ampdu_lock);
spin_unlock_bh(&sta->lock); spin_unlock_bh(&sta->lock);
rcu_read_unlock(); rcu_read_unlock();
......
...@@ -723,13 +723,7 @@ struct ieee80211_local { ...@@ -723,13 +723,7 @@ struct ieee80211_local {
struct sk_buff_head pending[IEEE80211_MAX_QUEUES]; struct sk_buff_head pending[IEEE80211_MAX_QUEUES];
struct tasklet_struct tx_pending_tasklet; struct tasklet_struct tx_pending_tasklet;
/* atomic_t agg_queue_stop[IEEE80211_MAX_QUEUES];
* This lock is used to prevent concurrent A-MPDU
* session start/stop processing, this thus also
* synchronises the ->ampdu_action() callback to
* drivers and limits it to one at a time.
*/
spinlock_t ampdu_lock;
/* number of interfaces with corresponding IFF_ flags */ /* number of interfaces with corresponding IFF_ flags */
atomic_t iff_allmultis, iff_promiscs; atomic_t iff_allmultis, iff_promiscs;
......
...@@ -463,8 +463,10 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len, ...@@ -463,8 +463,10 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
sta_info_init(local); sta_info_init(local);
for (i = 0; i < IEEE80211_MAX_QUEUES; i++) for (i = 0; i < IEEE80211_MAX_QUEUES; i++) {
skb_queue_head_init(&local->pending[i]); skb_queue_head_init(&local->pending[i]);
atomic_set(&local->agg_queue_stop[i], 0);
}
tasklet_init(&local->tx_pending_tasklet, ieee80211_tx_pending, tasklet_init(&local->tx_pending_tasklet, ieee80211_tx_pending,
(unsigned long)local); (unsigned long)local);
...@@ -475,8 +477,6 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len, ...@@ -475,8 +477,6 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
skb_queue_head_init(&local->skb_queue); skb_queue_head_init(&local->skb_queue);
skb_queue_head_init(&local->skb_queue_unreliable); skb_queue_head_init(&local->skb_queue_unreliable);
spin_lock_init(&local->ampdu_lock);
return local_to_hw(local); return local_to_hw(local);
} }
EXPORT_SYMBOL(ieee80211_alloc_hw); EXPORT_SYMBOL(ieee80211_alloc_hw);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment