Commit c2aa2885 authored by David S. Miller's avatar David S. Miller

mac80211: Decrease number of explicit ->tx_queue references.

Accomplish this by using local variables.
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 05297949
...@@ -570,6 +570,7 @@ static int ieee80211_stop(struct net_device *dev) ...@@ -570,6 +570,7 @@ static int ieee80211_stop(struct net_device *dev)
int ieee80211_start_tx_ba_session(struct ieee80211_hw *hw, u8 *ra, u16 tid) int ieee80211_start_tx_ba_session(struct ieee80211_hw *hw, u8 *ra, u16 tid)
{ {
struct ieee80211_local *local = hw_to_local(hw); struct ieee80211_local *local = hw_to_local(hw);
struct netdev_queue *txq;
struct sta_info *sta; struct sta_info *sta;
struct ieee80211_sub_if_data *sdata; struct ieee80211_sub_if_data *sdata;
u16 start_seq_num = 0; u16 start_seq_num = 0;
...@@ -636,7 +637,8 @@ int ieee80211_start_tx_ba_session(struct ieee80211_hw *hw, u8 *ra, u16 tid) ...@@ -636,7 +637,8 @@ int ieee80211_start_tx_ba_session(struct ieee80211_hw *hw, u8 *ra, u16 tid)
/* ensure that TX flow won't interrupt us /* ensure that TX flow won't interrupt us
* until the end of the call to requeue function */ * until the end of the call to requeue function */
spin_lock_bh(&local->mdev->tx_queue.lock); txq = &local->mdev->tx_queue;
spin_lock_bh(&txq->lock);
/* create a new queue for this aggregation */ /* create a new queue for this aggregation */
ret = ieee80211_ht_agg_queue_add(local, sta, tid); ret = ieee80211_ht_agg_queue_add(local, sta, tid);
...@@ -675,7 +677,7 @@ int ieee80211_start_tx_ba_session(struct ieee80211_hw *hw, u8 *ra, u16 tid) ...@@ -675,7 +677,7 @@ int ieee80211_start_tx_ba_session(struct ieee80211_hw *hw, u8 *ra, u16 tid)
/* Will put all the packets in the new SW queue */ /* Will put all the packets in the new SW queue */
ieee80211_requeue(local, ieee802_1d_to_ac[tid]); ieee80211_requeue(local, ieee802_1d_to_ac[tid]);
spin_unlock_bh(&local->mdev->tx_queue.lock); spin_unlock_bh(&txq->lock);
spin_unlock_bh(&sta->lock); spin_unlock_bh(&sta->lock);
/* send an addBA request */ /* send an addBA request */
...@@ -701,7 +703,7 @@ int ieee80211_start_tx_ba_session(struct ieee80211_hw *hw, u8 *ra, u16 tid) ...@@ -701,7 +703,7 @@ int ieee80211_start_tx_ba_session(struct ieee80211_hw *hw, u8 *ra, u16 tid)
err_unlock_queue: err_unlock_queue:
kfree(sta->ampdu_mlme.tid_tx[tid]); kfree(sta->ampdu_mlme.tid_tx[tid]);
sta->ampdu_mlme.tid_tx[tid] = NULL; sta->ampdu_mlme.tid_tx[tid] = NULL;
spin_unlock_bh(&local->mdev->tx_queue.lock); spin_unlock_bh(&txq->lock);
ret = -EBUSY; ret = -EBUSY;
err_unlock_sta: err_unlock_sta:
spin_unlock_bh(&sta->lock); spin_unlock_bh(&sta->lock);
...@@ -826,6 +828,7 @@ EXPORT_SYMBOL(ieee80211_start_tx_ba_cb); ...@@ -826,6 +828,7 @@ EXPORT_SYMBOL(ieee80211_start_tx_ba_cb);
void ieee80211_stop_tx_ba_cb(struct ieee80211_hw *hw, u8 *ra, u8 tid) void ieee80211_stop_tx_ba_cb(struct ieee80211_hw *hw, u8 *ra, u8 tid)
{ {
struct ieee80211_local *local = hw_to_local(hw); struct ieee80211_local *local = hw_to_local(hw);
struct netdev_queue *txq;
struct sta_info *sta; struct sta_info *sta;
u8 *state; u8 *state;
int agg_queue; int agg_queue;
...@@ -875,10 +878,11 @@ void ieee80211_stop_tx_ba_cb(struct ieee80211_hw *hw, u8 *ra, u8 tid) ...@@ -875,10 +878,11 @@ void ieee80211_stop_tx_ba_cb(struct ieee80211_hw *hw, u8 *ra, u8 tid)
/* avoid ordering issues: we are the only one that can modify /* avoid ordering issues: we are the only one that can modify
* the content of the qdiscs */ * the content of the qdiscs */
spin_lock_bh(&local->mdev->tx_queue.lock); txq = &local->mdev->tx_queue;
spin_lock_bh(&txq->lock);
/* remove the queue for this aggregation */ /* remove the queue for this aggregation */
ieee80211_ht_agg_queue_remove(local, sta, tid, 1); ieee80211_ht_agg_queue_remove(local, sta, tid, 1);
spin_unlock_bh(&local->mdev->tx_queue.lock); spin_unlock_bh(&txq->lock);
/* we just requeued the all the frames that were in the removed /* we just requeued the all the frames that were in the removed
* queue, and since we might miss a softirq we do netif_schedule. * queue, and since we might miss a softirq we do netif_schedule.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment