Commit cac08552 authored by Rajkumar Manoharan's avatar Rajkumar Manoharan Committed by Kalle Valo

ath10k: move mgmt descriptor limit handle under mgmt_tx

Frames that are transmitted via MGMT_TX are using reserved descriptor
slots in firmware. This limitation is for the htt_mgmt_tx path itself,
not for mgmt frames per se. In 16 MBSSID scenario, these reserved slots
will be easy exhausted due to frequent probe responses. So for 10.4
based solutions, probe responses are limited by a threshold (24).

management tx path is separate for all except tlv based solutions. Since
tlv solutions (qca6174 & qca9377) do not support 16 AP interfaces, it is
safe to move management descriptor limitation check under mgmt_tx
function. Though CPU improvement is negligible, unlikely conditions or
never hit conditions in hot path can be avoided on data transmission.
Signed-off-by: default avatarRajkumar Manoharan <rmanohar@qti.qualcomm.com>
Signed-off-by: default avatarKalle Valo <kvalo@qca.qualcomm.com>
parent 2ce9b25c
...@@ -1776,10 +1776,10 @@ void ath10k_htt_tx_txq_update(struct ieee80211_hw *hw, ...@@ -1776,10 +1776,10 @@ void ath10k_htt_tx_txq_update(struct ieee80211_hw *hw,
void ath10k_htt_tx_txq_recalc(struct ieee80211_hw *hw, void ath10k_htt_tx_txq_recalc(struct ieee80211_hw *hw,
struct ieee80211_txq *txq); struct ieee80211_txq *txq);
void ath10k_htt_tx_txq_sync(struct ath10k *ar); void ath10k_htt_tx_txq_sync(struct ath10k *ar);
void ath10k_htt_tx_dec_pending(struct ath10k_htt *htt, void ath10k_htt_tx_dec_pending(struct ath10k_htt *htt);
bool is_mgmt); int ath10k_htt_tx_inc_pending(struct ath10k_htt *htt);
int ath10k_htt_tx_inc_pending(struct ath10k_htt *htt, void ath10k_htt_tx_mgmt_dec_pending(struct ath10k_htt *htt);
bool is_mgmt, int ath10k_htt_tx_mgmt_inc_pending(struct ath10k_htt *htt, bool is_mgmt,
bool is_presp); bool is_presp);
int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt, struct sk_buff *skb); int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt, struct sk_buff *skb);
......
...@@ -2354,7 +2354,12 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb) ...@@ -2354,7 +2354,12 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
break; break;
} }
ath10k_txrx_tx_unref(htt, &tx_done); status = ath10k_txrx_tx_unref(htt, &tx_done);
if (!status) {
spin_lock_bh(&htt->tx_lock);
ath10k_htt_tx_mgmt_dec_pending(htt);
spin_unlock_bh(&htt->tx_lock);
}
ath10k_mac_tx_push_pending(ar); ath10k_mac_tx_push_pending(ar);
break; break;
} }
......
...@@ -149,46 +149,58 @@ void ath10k_htt_tx_txq_update(struct ieee80211_hw *hw, ...@@ -149,46 +149,58 @@ void ath10k_htt_tx_txq_update(struct ieee80211_hw *hw,
spin_unlock_bh(&ar->htt.tx_lock); spin_unlock_bh(&ar->htt.tx_lock);
} }
void ath10k_htt_tx_dec_pending(struct ath10k_htt *htt, void ath10k_htt_tx_dec_pending(struct ath10k_htt *htt)
bool is_mgmt)
{ {
lockdep_assert_held(&htt->tx_lock); lockdep_assert_held(&htt->tx_lock);
if (is_mgmt)
htt->num_pending_mgmt_tx--;
htt->num_pending_tx--; htt->num_pending_tx--;
if (htt->num_pending_tx == htt->max_num_pending_tx - 1) if (htt->num_pending_tx == htt->max_num_pending_tx - 1)
ath10k_mac_tx_unlock(htt->ar, ATH10K_TX_PAUSE_Q_FULL); ath10k_mac_tx_unlock(htt->ar, ATH10K_TX_PAUSE_Q_FULL);
} }
int ath10k_htt_tx_inc_pending(struct ath10k_htt *htt, int ath10k_htt_tx_inc_pending(struct ath10k_htt *htt)
bool is_mgmt, {
lockdep_assert_held(&htt->tx_lock);
if (htt->num_pending_tx >= htt->max_num_pending_tx)
return -EBUSY;
htt->num_pending_tx++;
if (htt->num_pending_tx == htt->max_num_pending_tx)
ath10k_mac_tx_lock(htt->ar, ATH10K_TX_PAUSE_Q_FULL);
return 0;
}
int ath10k_htt_tx_mgmt_inc_pending(struct ath10k_htt *htt, bool is_mgmt,
bool is_presp) bool is_presp)
{ {
struct ath10k *ar = htt->ar; struct ath10k *ar = htt->ar;
lockdep_assert_held(&htt->tx_lock); lockdep_assert_held(&htt->tx_lock);
if (htt->num_pending_tx >= htt->max_num_pending_tx) if (!is_mgmt || !ar->hw_params.max_probe_resp_desc_thres)
return -EBUSY; return 0;
if (is_mgmt && if (is_presp &&
is_presp &&
ar->hw_params.max_probe_resp_desc_thres &&
ar->hw_params.max_probe_resp_desc_thres < htt->num_pending_mgmt_tx) ar->hw_params.max_probe_resp_desc_thres < htt->num_pending_mgmt_tx)
return -EBUSY; return -EBUSY;
if (is_mgmt)
htt->num_pending_mgmt_tx++; htt->num_pending_mgmt_tx++;
htt->num_pending_tx++;
if (htt->num_pending_tx == htt->max_num_pending_tx)
ath10k_mac_tx_lock(htt->ar, ATH10K_TX_PAUSE_Q_FULL);
return 0; return 0;
} }
void ath10k_htt_tx_mgmt_dec_pending(struct ath10k_htt *htt)
{
lockdep_assert_held(&htt->tx_lock);
if (!htt->ar->hw_params.max_probe_resp_desc_thres)
return;
htt->num_pending_mgmt_tx--;
}
int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt, struct sk_buff *skb) int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt, struct sk_buff *skb)
{ {
struct ath10k *ar = htt->ar; struct ath10k *ar = htt->ar;
......
...@@ -3699,8 +3699,6 @@ static bool ath10k_mac_tx_can_push(struct ieee80211_hw *hw, ...@@ -3699,8 +3699,6 @@ static bool ath10k_mac_tx_can_push(struct ieee80211_hw *hw,
int ath10k_mac_tx_push_txq(struct ieee80211_hw *hw, int ath10k_mac_tx_push_txq(struct ieee80211_hw *hw,
struct ieee80211_txq *txq) struct ieee80211_txq *txq)
{ {
const bool is_mgmt = false;
const bool is_presp = false;
struct ath10k *ar = hw->priv; struct ath10k *ar = hw->priv;
struct ath10k_htt *htt = &ar->htt; struct ath10k_htt *htt = &ar->htt;
struct ath10k_txq *artxq = (void *)txq->drv_priv; struct ath10k_txq *artxq = (void *)txq->drv_priv;
...@@ -3713,7 +3711,7 @@ int ath10k_mac_tx_push_txq(struct ieee80211_hw *hw, ...@@ -3713,7 +3711,7 @@ int ath10k_mac_tx_push_txq(struct ieee80211_hw *hw,
int ret; int ret;
spin_lock_bh(&ar->htt.tx_lock); spin_lock_bh(&ar->htt.tx_lock);
ret = ath10k_htt_tx_inc_pending(htt, is_mgmt, is_presp); ret = ath10k_htt_tx_inc_pending(htt);
spin_unlock_bh(&ar->htt.tx_lock); spin_unlock_bh(&ar->htt.tx_lock);
if (ret) if (ret)
...@@ -3722,7 +3720,7 @@ int ath10k_mac_tx_push_txq(struct ieee80211_hw *hw, ...@@ -3722,7 +3720,7 @@ int ath10k_mac_tx_push_txq(struct ieee80211_hw *hw,
skb = ieee80211_tx_dequeue(hw, txq); skb = ieee80211_tx_dequeue(hw, txq);
if (!skb) { if (!skb) {
spin_lock_bh(&ar->htt.tx_lock); spin_lock_bh(&ar->htt.tx_lock);
ath10k_htt_tx_dec_pending(htt, is_mgmt); ath10k_htt_tx_dec_pending(htt);
spin_unlock_bh(&ar->htt.tx_lock); spin_unlock_bh(&ar->htt.tx_lock);
return -ENOENT; return -ENOENT;
...@@ -3739,7 +3737,7 @@ int ath10k_mac_tx_push_txq(struct ieee80211_hw *hw, ...@@ -3739,7 +3737,7 @@ int ath10k_mac_tx_push_txq(struct ieee80211_hw *hw,
ath10k_warn(ar, "failed to push frame: %d\n", ret); ath10k_warn(ar, "failed to push frame: %d\n", ret);
spin_lock_bh(&ar->htt.tx_lock); spin_lock_bh(&ar->htt.tx_lock);
ath10k_htt_tx_dec_pending(htt, is_mgmt); ath10k_htt_tx_dec_pending(htt);
spin_unlock_bh(&ar->htt.tx_lock); spin_unlock_bh(&ar->htt.tx_lock);
return ret; return ret;
...@@ -3978,14 +3976,13 @@ static void ath10k_mac_op_tx(struct ieee80211_hw *hw, ...@@ -3978,14 +3976,13 @@ static void ath10k_mac_op_tx(struct ieee80211_hw *hw,
txpath = ath10k_mac_tx_h_get_txpath(ar, skb, txmode); txpath = ath10k_mac_tx_h_get_txpath(ar, skb, txmode);
is_htt = (txpath == ATH10K_MAC_TX_HTT || is_htt = (txpath == ATH10K_MAC_TX_HTT ||
txpath == ATH10K_MAC_TX_HTT_MGMT); txpath == ATH10K_MAC_TX_HTT_MGMT);
is_mgmt = (txpath == ATH10K_MAC_TX_HTT_MGMT);
if (is_htt) { if (is_htt) {
spin_lock_bh(&ar->htt.tx_lock); spin_lock_bh(&ar->htt.tx_lock);
is_mgmt = ieee80211_is_mgmt(hdr->frame_control);
is_presp = ieee80211_is_probe_resp(hdr->frame_control); is_presp = ieee80211_is_probe_resp(hdr->frame_control);
ret = ath10k_htt_tx_inc_pending(htt, is_mgmt, is_presp); ret = ath10k_htt_tx_inc_pending(htt);
if (ret) { if (ret) {
ath10k_warn(ar, "failed to increase tx pending count: %d, dropping\n", ath10k_warn(ar, "failed to increase tx pending count: %d, dropping\n",
ret); ret);
...@@ -3994,6 +3991,15 @@ static void ath10k_mac_op_tx(struct ieee80211_hw *hw, ...@@ -3994,6 +3991,15 @@ static void ath10k_mac_op_tx(struct ieee80211_hw *hw,
return; return;
} }
ret = ath10k_htt_tx_mgmt_inc_pending(htt, is_mgmt, is_presp);
if (ret) {
ath10k_warn(ar, "failed to increase tx mgmt pending count: %d, dropping\n",
ret);
ath10k_htt_tx_dec_pending(htt);
spin_unlock_bh(&ar->htt.tx_lock);
ieee80211_free_txskb(ar->hw, skb);
return;
}
spin_unlock_bh(&ar->htt.tx_lock); spin_unlock_bh(&ar->htt.tx_lock);
} }
...@@ -4002,7 +4008,9 @@ static void ath10k_mac_op_tx(struct ieee80211_hw *hw, ...@@ -4002,7 +4008,9 @@ static void ath10k_mac_op_tx(struct ieee80211_hw *hw,
ath10k_warn(ar, "failed to transmit frame: %d\n", ret); ath10k_warn(ar, "failed to transmit frame: %d\n", ret);
if (is_htt) { if (is_htt) {
spin_lock_bh(&ar->htt.tx_lock); spin_lock_bh(&ar->htt.tx_lock);
ath10k_htt_tx_dec_pending(htt, is_mgmt); ath10k_htt_tx_dec_pending(htt);
if (is_mgmt)
ath10k_htt_tx_mgmt_dec_pending(htt);
spin_unlock_bh(&ar->htt.tx_lock); spin_unlock_bh(&ar->htt.tx_lock);
} }
return; return;
......
...@@ -49,7 +49,7 @@ static void ath10k_report_offchan_tx(struct ath10k *ar, struct sk_buff *skb) ...@@ -49,7 +49,7 @@ static void ath10k_report_offchan_tx(struct ath10k *ar, struct sk_buff *skb)
spin_unlock_bh(&ar->data_lock); spin_unlock_bh(&ar->data_lock);
} }
void ath10k_txrx_tx_unref(struct ath10k_htt *htt, int ath10k_txrx_tx_unref(struct ath10k_htt *htt,
const struct htt_tx_done *tx_done) const struct htt_tx_done *tx_done)
{ {
struct ath10k *ar = htt->ar; struct ath10k *ar = htt->ar;
...@@ -59,7 +59,6 @@ void ath10k_txrx_tx_unref(struct ath10k_htt *htt, ...@@ -59,7 +59,6 @@ void ath10k_txrx_tx_unref(struct ath10k_htt *htt,
struct ath10k_skb_cb *skb_cb; struct ath10k_skb_cb *skb_cb;
struct ath10k_txq *artxq; struct ath10k_txq *artxq;
struct sk_buff *msdu; struct sk_buff *msdu;
bool limit_mgmt_desc = false;
ath10k_dbg(ar, ATH10K_DBG_HTT, ath10k_dbg(ar, ATH10K_DBG_HTT,
"htt tx completion msdu_id %u discard %d no_ack %d success %d\n", "htt tx completion msdu_id %u discard %d no_ack %d success %d\n",
...@@ -69,7 +68,7 @@ void ath10k_txrx_tx_unref(struct ath10k_htt *htt, ...@@ -69,7 +68,7 @@ void ath10k_txrx_tx_unref(struct ath10k_htt *htt,
if (tx_done->msdu_id >= htt->max_num_pending_tx) { if (tx_done->msdu_id >= htt->max_num_pending_tx) {
ath10k_warn(ar, "warning: msdu_id %d too big, ignoring\n", ath10k_warn(ar, "warning: msdu_id %d too big, ignoring\n",
tx_done->msdu_id); tx_done->msdu_id);
return; return -EINVAL;
} }
spin_lock_bh(&htt->tx_lock); spin_lock_bh(&htt->tx_lock);
...@@ -78,22 +77,18 @@ void ath10k_txrx_tx_unref(struct ath10k_htt *htt, ...@@ -78,22 +77,18 @@ void ath10k_txrx_tx_unref(struct ath10k_htt *htt,
ath10k_warn(ar, "received tx completion for invalid msdu_id: %d\n", ath10k_warn(ar, "received tx completion for invalid msdu_id: %d\n",
tx_done->msdu_id); tx_done->msdu_id);
spin_unlock_bh(&htt->tx_lock); spin_unlock_bh(&htt->tx_lock);
return; return -ENOENT;
} }
skb_cb = ATH10K_SKB_CB(msdu); skb_cb = ATH10K_SKB_CB(msdu);
txq = skb_cb->txq; txq = skb_cb->txq;
artxq = (void *)txq->drv_priv; artxq = (void *)txq->drv_priv;
if (unlikely(skb_cb->flags & ATH10K_SKB_F_MGMT) &&
ar->hw_params.max_probe_resp_desc_thres)
limit_mgmt_desc = true;
if (txq) if (txq)
artxq->num_fw_queued--; artxq->num_fw_queued--;
ath10k_htt_tx_free_msdu_id(htt, tx_done->msdu_id); ath10k_htt_tx_free_msdu_id(htt, tx_done->msdu_id);
ath10k_htt_tx_dec_pending(htt, limit_mgmt_desc); ath10k_htt_tx_dec_pending(htt);
if (htt->num_pending_tx == 0) if (htt->num_pending_tx == 0)
wake_up(&htt->empty_tx_wq); wake_up(&htt->empty_tx_wq);
spin_unlock_bh(&htt->tx_lock); spin_unlock_bh(&htt->tx_lock);
...@@ -108,7 +103,7 @@ void ath10k_txrx_tx_unref(struct ath10k_htt *htt, ...@@ -108,7 +103,7 @@ void ath10k_txrx_tx_unref(struct ath10k_htt *htt,
if (tx_done->discard) { if (tx_done->discard) {
ieee80211_free_txskb(htt->ar->hw, msdu); ieee80211_free_txskb(htt->ar->hw, msdu);
return; return 0;
} }
if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
...@@ -122,6 +117,7 @@ void ath10k_txrx_tx_unref(struct ath10k_htt *htt, ...@@ -122,6 +117,7 @@ void ath10k_txrx_tx_unref(struct ath10k_htt *htt,
ieee80211_tx_status(htt->ar->hw, msdu); ieee80211_tx_status(htt->ar->hw, msdu);
/* we do not own the msdu anymore */ /* we do not own the msdu anymore */
return 0;
} }
struct ath10k_peer *ath10k_peer_find(struct ath10k *ar, int vdev_id, struct ath10k_peer *ath10k_peer_find(struct ath10k *ar, int vdev_id,
......
...@@ -19,7 +19,7 @@ ...@@ -19,7 +19,7 @@
#include "htt.h" #include "htt.h"
void ath10k_txrx_tx_unref(struct ath10k_htt *htt, int ath10k_txrx_tx_unref(struct ath10k_htt *htt,
const struct htt_tx_done *tx_done); const struct htt_tx_done *tx_done);
struct ath10k_peer *ath10k_peer_find(struct ath10k *ar, int vdev_id, struct ath10k_peer *ath10k_peer_find(struct ath10k *ar, int vdev_id,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment