Commit 0fe88644 authored by Felix Fietkau's avatar Felix Fietkau

mt76: improve tx status codepath

Use ieee80211_tx_status_ext instead of ieee80211_free_skb and
ieee80211_tx_status. This makes it compatible with 802.3 encap offload
and improves performance by removing a redundant sta lookup
Signed-off-by: default avatarFelix Fietkau <nbd@nbd.name>
parent 6d51cae2
...@@ -1056,7 +1056,14 @@ struct sk_buff *mt76_tx_status_skb_get(struct mt76_dev *dev, ...@@ -1056,7 +1056,14 @@ struct sk_buff *mt76_tx_status_skb_get(struct mt76_dev *dev,
struct sk_buff_head *list); struct sk_buff_head *list);
void mt76_tx_status_skb_done(struct mt76_dev *dev, struct sk_buff *skb, void mt76_tx_status_skb_done(struct mt76_dev *dev, struct sk_buff *skb,
struct sk_buff_head *list); struct sk_buff_head *list);
void mt76_tx_complete_skb(struct mt76_dev *dev, u16 wcid, struct sk_buff *skb); void __mt76_tx_complete_skb(struct mt76_dev *dev, u16 wcid, struct sk_buff *skb,
struct list_head *free_list);
static inline void
mt76_tx_complete_skb(struct mt76_dev *dev, u16 wcid, struct sk_buff *skb)
{
__mt76_tx_complete_skb(dev, wcid, skb, NULL);
}
void mt76_tx_status_check(struct mt76_dev *dev, struct mt76_wcid *wcid, void mt76_tx_status_check(struct mt76_dev *dev, struct mt76_wcid *wcid,
bool flush); bool flush);
int mt76_sta_state(struct ieee80211_hw *hw, struct ieee80211_vif *vif, int mt76_sta_state(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
......
...@@ -54,11 +54,23 @@ mt76_tx_status_unlock(struct mt76_dev *dev, struct sk_buff_head *list) ...@@ -54,11 +54,23 @@ mt76_tx_status_unlock(struct mt76_dev *dev, struct sk_buff_head *list)
spin_unlock_bh(&dev->status_list.lock); spin_unlock_bh(&dev->status_list.lock);
rcu_read_lock();
while ((skb = __skb_dequeue(list)) != NULL) { while ((skb = __skb_dequeue(list)) != NULL) {
struct ieee80211_tx_status status = {
.skb = skb,
.info = IEEE80211_SKB_CB(skb),
};
struct mt76_tx_cb *cb = mt76_tx_skb_cb(skb);
struct mt76_wcid *wcid;
wcid = rcu_dereference(dev->wcid[cb->wcid]);
if (wcid)
status.sta = wcid_to_sta(wcid);
hw = mt76_tx_status_get_hw(dev, skb); hw = mt76_tx_status_get_hw(dev, skb);
ieee80211_tx_status(hw, skb); ieee80211_tx_status_ext(hw, &status);
} }
rcu_read_unlock();
} }
EXPORT_SYMBOL_GPL(mt76_tx_status_unlock); EXPORT_SYMBOL_GPL(mt76_tx_status_unlock);
...@@ -80,7 +92,7 @@ __mt76_tx_status_skb_done(struct mt76_dev *dev, struct sk_buff *skb, u8 flags, ...@@ -80,7 +92,7 @@ __mt76_tx_status_skb_done(struct mt76_dev *dev, struct sk_buff *skb, u8 flags,
/* Tx status can be unreliable. if it fails, mark the frame as ACKed */ /* Tx status can be unreliable. if it fails, mark the frame as ACKed */
if (flags & MT_TX_CB_TXS_FAILED) { if (flags & MT_TX_CB_TXS_FAILED) {
ieee80211_tx_info_clear_status(info); info->status.rates[0].count = 0;
info->status.rates[0].idx = -1; info->status.rates[0].idx = -1;
info->flags |= IEEE80211_TX_STAT_ACK; info->flags |= IEEE80211_TX_STAT_ACK;
} }
...@@ -173,36 +185,37 @@ mt76_tx_status_check(struct mt76_dev *dev, struct mt76_wcid *wcid, bool flush) ...@@ -173,36 +185,37 @@ mt76_tx_status_check(struct mt76_dev *dev, struct mt76_wcid *wcid, bool flush)
EXPORT_SYMBOL_GPL(mt76_tx_status_check); EXPORT_SYMBOL_GPL(mt76_tx_status_check);
static void static void
mt76_tx_check_non_aql(struct mt76_dev *dev, u16 wcid_idx, struct sk_buff *skb) mt76_tx_check_non_aql(struct mt76_dev *dev, struct mt76_wcid *wcid,
struct sk_buff *skb)
{ {
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct mt76_wcid *wcid;
int pending; int pending;
if (info->tx_time_est) if (!wcid || info->tx_time_est)
return; return;
if (wcid_idx >= ARRAY_SIZE(dev->wcid)) pending = atomic_dec_return(&wcid->non_aql_packets);
return; if (pending < 0)
atomic_cmpxchg(&wcid->non_aql_packets, pending, 0);
rcu_read_lock();
wcid = rcu_dereference(dev->wcid[wcid_idx]);
if (wcid) {
pending = atomic_dec_return(&wcid->non_aql_packets);
if (pending < 0)
atomic_cmpxchg(&wcid->non_aql_packets, pending, 0);
}
rcu_read_unlock();
} }
void mt76_tx_complete_skb(struct mt76_dev *dev, u16 wcid_idx, struct sk_buff *skb) void __mt76_tx_complete_skb(struct mt76_dev *dev, u16 wcid_idx, struct sk_buff *skb,
struct list_head *free_list)
{ {
struct ieee80211_tx_status status = {
.skb = skb,
.free_list = free_list,
};
struct mt76_wcid *wcid = NULL;
struct ieee80211_hw *hw; struct ieee80211_hw *hw;
struct sk_buff_head list; struct sk_buff_head list;
mt76_tx_check_non_aql(dev, wcid_idx, skb); rcu_read_lock();
if (wcid_idx < ARRAY_SIZE(dev->wcid))
wcid = rcu_dereference(dev->wcid[wcid_idx]);
mt76_tx_check_non_aql(dev, wcid, skb);
#ifdef CONFIG_NL80211_TESTMODE #ifdef CONFIG_NL80211_TESTMODE
if (mt76_is_testmode_skb(dev, skb, &hw)) { if (mt76_is_testmode_skb(dev, skb, &hw)) {
...@@ -214,21 +227,25 @@ void mt76_tx_complete_skb(struct mt76_dev *dev, u16 wcid_idx, struct sk_buff *sk ...@@ -214,21 +227,25 @@ void mt76_tx_complete_skb(struct mt76_dev *dev, u16 wcid_idx, struct sk_buff *sk
wake_up(&dev->tx_wait); wake_up(&dev->tx_wait);
dev_kfree_skb_any(skb); dev_kfree_skb_any(skb);
return; goto out;
} }
#endif #endif
if (!skb->prev) { if (!skb->prev) {
hw = mt76_tx_status_get_hw(dev, skb); hw = mt76_tx_status_get_hw(dev, skb);
ieee80211_free_txskb(hw, skb); status.sta = wcid_to_sta(wcid);
return; ieee80211_tx_status_ext(hw, &status);
goto out;
} }
mt76_tx_status_lock(dev, &list); mt76_tx_status_lock(dev, &list);
__mt76_tx_status_skb_done(dev, skb, MT_TX_CB_DMA_DONE, &list); __mt76_tx_status_skb_done(dev, skb, MT_TX_CB_DMA_DONE, &list);
mt76_tx_status_unlock(dev, &list); mt76_tx_status_unlock(dev, &list);
out:
rcu_read_unlock();
} }
EXPORT_SYMBOL_GPL(mt76_tx_complete_skb); EXPORT_SYMBOL_GPL(__mt76_tx_complete_skb);
static int static int
__mt76_tx_queue_skb(struct mt76_phy *phy, int qid, struct sk_buff *skb, __mt76_tx_queue_skb(struct mt76_phy *phy, int qid, struct sk_buff *skb,
...@@ -244,11 +261,15 @@ __mt76_tx_queue_skb(struct mt76_phy *phy, int qid, struct sk_buff *skb, ...@@ -244,11 +261,15 @@ __mt76_tx_queue_skb(struct mt76_phy *phy, int qid, struct sk_buff *skb,
non_aql = !info->tx_time_est; non_aql = !info->tx_time_est;
idx = dev->queue_ops->tx_queue_skb(dev, q, skb, wcid, sta); idx = dev->queue_ops->tx_queue_skb(dev, q, skb, wcid, sta);
if (idx < 0 || !sta || !non_aql) if (idx < 0 || !sta)
return idx; return idx;
wcid = (struct mt76_wcid *)sta->drv_priv; wcid = (struct mt76_wcid *)sta->drv_priv;
q->entry[idx].wcid = wcid->idx; q->entry[idx].wcid = wcid->idx;
if (!non_aql)
return idx;
pending = atomic_inc_return(&wcid->non_aql_packets); pending = atomic_inc_return(&wcid->non_aql_packets);
if (stop && pending >= MT_MAX_NON_AQL_PKT) if (stop && pending >= MT_MAX_NON_AQL_PKT)
*stop = true; *stop = true;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment