Commit 18667600 authored by Toke Høiland-Jørgensen's avatar Toke Høiland-Jørgensen Committed by Johannes Berg

mac80211: Add TXQ scheduling API

This adds an API to mac80211 to handle scheduling of TXQs. The interface
between driver and mac80211 for TXQ handling is changed by adding two new
functions: ieee80211_next_txq(), which will return the next TXQ to schedule
in the current round-robin rotation, and ieee80211_return_txq(), which the
driver uses to indicate that it has finished scheduling a TXQ (which will
then be put back in the scheduling rotation if it isn't empty).

The driver must call ieee80211_txq_schedule_start() at the start of each
scheduling session, and ieee80211_txq_schedule_end() at the end. The API
then guarantees that the same TXQ is not returned twice in the same
session (so a driver can loop on ieee80211_next_txq() without worrying
about breaking the loop.

Usage of the new API is optional, so drivers can be ported one at a time.
In this patch, the actual scheduling performed by mac80211 is simple
round-robin, but a subsequent commit adds airtime fairness awareness to the
scheduler.
Signed-off-by: default avatarToke Høiland-Jørgensen <toke@toke.dk>
[minor kernel-doc fix, propagate sparse locking checks out]
Signed-off-by: default avatarJohannes Berg <johannes.berg@intel.com>
parent f04d402f
...@@ -108,9 +108,15 @@ ...@@ -108,9 +108,15 @@
* The driver is expected to initialize its private per-queue data for stations * The driver is expected to initialize its private per-queue data for stations
* and interfaces in the .add_interface and .sta_add ops. * and interfaces in the .add_interface and .sta_add ops.
* *
* The driver can't access the queue directly. To dequeue a frame, it calls * The driver can't access the queue directly. To dequeue a frame from a
* ieee80211_tx_dequeue(). Whenever mac80211 adds a new frame to a queue, it * txq, it calls ieee80211_tx_dequeue(). Whenever mac80211 adds a new frame to a
* calls the .wake_tx_queue driver op. * queue, it calls the .wake_tx_queue driver op.
*
* Drivers can optionally delegate responsibility for scheduling queues to
* mac80211, to take advantage of airtime fairness accounting. In this case, to
* obtain the next queue to pull frames from, the driver calls
* ieee80211_next_txq(). The driver is then expected to return the txq using
* ieee80211_return_txq().
* *
* For AP powersave TIM handling, the driver only needs to indicate if it has * For AP powersave TIM handling, the driver only needs to indicate if it has
* buffered packets in the driver specific data structures by calling * buffered packets in the driver specific data structures by calling
...@@ -6103,7 +6109,8 @@ void ieee80211_unreserve_tid(struct ieee80211_sta *sta, u8 tid); ...@@ -6103,7 +6109,8 @@ void ieee80211_unreserve_tid(struct ieee80211_sta *sta, u8 tid);
* ieee80211_tx_dequeue - dequeue a packet from a software tx queue * ieee80211_tx_dequeue - dequeue a packet from a software tx queue
* *
* @hw: pointer as obtained from ieee80211_alloc_hw() * @hw: pointer as obtained from ieee80211_alloc_hw()
* @txq: pointer obtained from station or virtual interface * @txq: pointer obtained from station or virtual interface, or from
* ieee80211_next_txq()
* *
* Returns the skb if successful, %NULL if no frame was available. * Returns the skb if successful, %NULL if no frame was available.
* *
...@@ -6118,6 +6125,54 @@ void ieee80211_unreserve_tid(struct ieee80211_sta *sta, u8 tid); ...@@ -6118,6 +6125,54 @@ void ieee80211_unreserve_tid(struct ieee80211_sta *sta, u8 tid);
struct sk_buff *ieee80211_tx_dequeue(struct ieee80211_hw *hw, struct sk_buff *ieee80211_tx_dequeue(struct ieee80211_hw *hw,
struct ieee80211_txq *txq); struct ieee80211_txq *txq);
/**
* ieee80211_next_txq - get next tx queue to pull packets from
*
* @hw: pointer as obtained from ieee80211_alloc_hw()
* @ac: AC number to return packets from.
*
* Should only be called between calls to ieee80211_txq_schedule_start()
* and ieee80211_txq_schedule_end().
* Returns the next txq if successful, %NULL if no queue is eligible. If a txq
* is returned, it should be returned with ieee80211_return_txq() after the
* driver has finished scheduling it.
*/
struct ieee80211_txq *ieee80211_next_txq(struct ieee80211_hw *hw, u8 ac);
/**
* ieee80211_return_txq - return a TXQ previously acquired by ieee80211_next_txq()
*
* @hw: pointer as obtained from ieee80211_alloc_hw()
* @txq: pointer obtained from station or virtual interface
*
* Should only be called between calls to ieee80211_txq_schedule_start()
* and ieee80211_txq_schedule_end().
*/
void ieee80211_return_txq(struct ieee80211_hw *hw, struct ieee80211_txq *txq);
/**
* ieee80211_txq_schedule_start - acquire locks for safe scheduling of an AC
*
* @hw: pointer as obtained from ieee80211_alloc_hw()
* @ac: AC number to acquire locks for
*
* Acquire locks needed to schedule TXQs from the given AC. Should be called
* before ieee80211_next_txq() or ieee80211_return_txq().
*/
void ieee80211_txq_schedule_start(struct ieee80211_hw *hw, u8 ac)
__acquires(txq_lock);
/**
* ieee80211_txq_schedule_end - release locks for safe scheduling of an AC
*
* @hw: pointer as obtained from ieee80211_alloc_hw()
* @ac: AC number to acquire locks for
*
* Release locks previously acquired by ieee80211_txq_schedule_end().
*/
void ieee80211_txq_schedule_end(struct ieee80211_hw *hw, u8 ac)
__releases(txq_lock);
/** /**
* ieee80211_txq_get_depth - get pending frame/byte count of given txq * ieee80211_txq_get_depth - get pending frame/byte count of given txq
* *
......
...@@ -229,7 +229,7 @@ ieee80211_agg_start_txq(struct sta_info *sta, int tid, bool enable) ...@@ -229,7 +229,7 @@ ieee80211_agg_start_txq(struct sta_info *sta, int tid, bool enable)
clear_bit(IEEE80211_TXQ_STOP, &txqi->flags); clear_bit(IEEE80211_TXQ_STOP, &txqi->flags);
local_bh_disable(); local_bh_disable();
rcu_read_lock(); rcu_read_lock();
drv_wake_tx_queue(sta->sdata->local, txqi); schedule_and_wake_txq(sta->sdata->local, txqi);
rcu_read_unlock(); rcu_read_unlock();
local_bh_enable(); local_bh_enable();
} }
......
...@@ -1173,6 +1173,15 @@ static inline void drv_wake_tx_queue(struct ieee80211_local *local, ...@@ -1173,6 +1173,15 @@ static inline void drv_wake_tx_queue(struct ieee80211_local *local,
local->ops->wake_tx_queue(&local->hw, &txq->txq); local->ops->wake_tx_queue(&local->hw, &txq->txq);
} }
static inline void schedule_and_wake_txq(struct ieee80211_local *local,
struct txq_info *txqi)
{
spin_lock_bh(&local->active_txq_lock[txqi->txq.ac]);
ieee80211_return_txq(&local->hw, &txqi->txq);
spin_unlock_bh(&local->active_txq_lock[txqi->txq.ac]);
drv_wake_tx_queue(local, txqi);
}
static inline int drv_can_aggregate_in_amsdu(struct ieee80211_local *local, static inline int drv_can_aggregate_in_amsdu(struct ieee80211_local *local,
struct sk_buff *head, struct sk_buff *head,
struct sk_buff *skb) struct sk_buff *skb)
......
...@@ -831,6 +831,8 @@ enum txq_info_flags { ...@@ -831,6 +831,8 @@ enum txq_info_flags {
* a fq_flow which is already owned by a different tin * a fq_flow which is already owned by a different tin
* @def_cvars: codel vars for @def_flow * @def_cvars: codel vars for @def_flow
* @frags: used to keep fragments created after dequeue * @frags: used to keep fragments created after dequeue
* @schedule_order: used with ieee80211_local->active_txqs
* @schedule_round: counter to prevent infinite loops on TXQ scheduling
*/ */
struct txq_info { struct txq_info {
struct fq_tin tin; struct fq_tin tin;
...@@ -838,6 +840,8 @@ struct txq_info { ...@@ -838,6 +840,8 @@ struct txq_info {
struct codel_vars def_cvars; struct codel_vars def_cvars;
struct codel_stats cstats; struct codel_stats cstats;
struct sk_buff_head frags; struct sk_buff_head frags;
struct list_head schedule_order;
u16 schedule_round;
unsigned long flags; unsigned long flags;
/* keep last! */ /* keep last! */
...@@ -1129,6 +1133,11 @@ struct ieee80211_local { ...@@ -1129,6 +1133,11 @@ struct ieee80211_local {
struct codel_vars *cvars; struct codel_vars *cvars;
struct codel_params cparams; struct codel_params cparams;
/* protects active_txqs and txqi->schedule_order */
spinlock_t active_txq_lock[IEEE80211_NUM_ACS];
struct list_head active_txqs[IEEE80211_NUM_ACS];
u16 schedule_round[IEEE80211_NUM_ACS];
const struct ieee80211_ops *ops; const struct ieee80211_ops *ops;
/* /*
......
...@@ -663,6 +663,11 @@ struct ieee80211_hw *ieee80211_alloc_hw_nm(size_t priv_data_len, ...@@ -663,6 +663,11 @@ struct ieee80211_hw *ieee80211_alloc_hw_nm(size_t priv_data_len,
spin_lock_init(&local->rx_path_lock); spin_lock_init(&local->rx_path_lock);
spin_lock_init(&local->queue_stop_reason_lock); spin_lock_init(&local->queue_stop_reason_lock);
for (i = 0; i < IEEE80211_NUM_ACS; i++) {
INIT_LIST_HEAD(&local->active_txqs[i]);
spin_lock_init(&local->active_txq_lock[i]);
}
INIT_LIST_HEAD(&local->chanctx_list); INIT_LIST_HEAD(&local->chanctx_list);
mutex_init(&local->chanctx_mtx); mutex_init(&local->chanctx_mtx);
......
...@@ -1249,7 +1249,7 @@ void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta) ...@@ -1249,7 +1249,7 @@ void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta)
if (!sta->sta.txq[i] || !txq_has_queue(sta->sta.txq[i])) if (!sta->sta.txq[i] || !txq_has_queue(sta->sta.txq[i]))
continue; continue;
drv_wake_tx_queue(local, to_txq_info(sta->sta.txq[i])); schedule_and_wake_txq(local, to_txq_info(sta->sta.txq[i]));
} }
skb_queue_head_init(&pending); skb_queue_head_init(&pending);
......
...@@ -1449,6 +1449,7 @@ void ieee80211_txq_init(struct ieee80211_sub_if_data *sdata, ...@@ -1449,6 +1449,7 @@ void ieee80211_txq_init(struct ieee80211_sub_if_data *sdata,
codel_vars_init(&txqi->def_cvars); codel_vars_init(&txqi->def_cvars);
codel_stats_init(&txqi->cstats); codel_stats_init(&txqi->cstats);
__skb_queue_head_init(&txqi->frags); __skb_queue_head_init(&txqi->frags);
INIT_LIST_HEAD(&txqi->schedule_order);
txqi->txq.vif = &sdata->vif; txqi->txq.vif = &sdata->vif;
...@@ -1489,6 +1490,9 @@ void ieee80211_txq_purge(struct ieee80211_local *local, ...@@ -1489,6 +1490,9 @@ void ieee80211_txq_purge(struct ieee80211_local *local,
fq_tin_reset(fq, tin, fq_skb_free_func); fq_tin_reset(fq, tin, fq_skb_free_func);
ieee80211_purge_tx_queue(&local->hw, &txqi->frags); ieee80211_purge_tx_queue(&local->hw, &txqi->frags);
spin_lock_bh(&local->active_txq_lock[txqi->txq.ac]);
list_del_init(&txqi->schedule_order);
spin_unlock_bh(&local->active_txq_lock[txqi->txq.ac]);
} }
void ieee80211_txq_set_params(struct ieee80211_local *local) void ieee80211_txq_set_params(struct ieee80211_local *local)
...@@ -1605,7 +1609,7 @@ static bool ieee80211_queue_skb(struct ieee80211_local *local, ...@@ -1605,7 +1609,7 @@ static bool ieee80211_queue_skb(struct ieee80211_local *local,
ieee80211_txq_enqueue(local, txqi, skb); ieee80211_txq_enqueue(local, txqi, skb);
spin_unlock_bh(&fq->lock); spin_unlock_bh(&fq->lock);
drv_wake_tx_queue(local, txqi); schedule_and_wake_txq(local, txqi);
return true; return true;
} }
...@@ -3630,6 +3634,60 @@ struct sk_buff *ieee80211_tx_dequeue(struct ieee80211_hw *hw, ...@@ -3630,6 +3634,60 @@ struct sk_buff *ieee80211_tx_dequeue(struct ieee80211_hw *hw,
} }
EXPORT_SYMBOL(ieee80211_tx_dequeue); EXPORT_SYMBOL(ieee80211_tx_dequeue);
struct ieee80211_txq *ieee80211_next_txq(struct ieee80211_hw *hw, u8 ac)
{
struct ieee80211_local *local = hw_to_local(hw);
struct txq_info *txqi = NULL;
lockdep_assert_held(&local->active_txq_lock[ac]);
txqi = list_first_entry_or_null(&local->active_txqs[ac],
struct txq_info,
schedule_order);
if (!txqi || txqi->schedule_round == local->schedule_round[ac])
return NULL;
list_del_init(&txqi->schedule_order);
txqi->schedule_round = local->schedule_round[ac];
return &txqi->txq;
}
EXPORT_SYMBOL(ieee80211_next_txq);
void ieee80211_return_txq(struct ieee80211_hw *hw,
struct ieee80211_txq *txq)
{
struct ieee80211_local *local = hw_to_local(hw);
struct txq_info *txqi = to_txq_info(txq);
lockdep_assert_held(&local->active_txq_lock[txq->ac]);
if (list_empty(&txqi->schedule_order) &&
(!skb_queue_empty(&txqi->frags) || txqi->tin.backlog_packets))
list_add_tail(&txqi->schedule_order,
&local->active_txqs[txq->ac]);
}
EXPORT_SYMBOL(ieee80211_return_txq);
void ieee80211_txq_schedule_start(struct ieee80211_hw *hw, u8 ac)
__acquires(txq_lock)
{
struct ieee80211_local *local = hw_to_local(hw);
spin_lock_bh(&local->active_txq_lock[ac]);
local->schedule_round[ac]++;
}
EXPORT_SYMBOL(ieee80211_txq_schedule_start);
void ieee80211_txq_schedule_end(struct ieee80211_hw *hw, u8 ac)
__releases(txq_lock)
{
struct ieee80211_local *local = hw_to_local(hw);
spin_unlock_bh(&local->active_txq_lock[ac]);
}
EXPORT_SYMBOL(ieee80211_txq_schedule_end);
void __ieee80211_subif_start_xmit(struct sk_buff *skb, void __ieee80211_subif_start_xmit(struct sk_buff *skb,
struct net_device *dev, struct net_device *dev,
u32 info_flags) u32 info_flags)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment