Commit 1131ad7f authored by Yan-Hsuan Chuang's avatar Yan-Hsuan Chuang Committed by Kalle Valo

rtw88: flush hardware tx queues

Sometimes mac80211 will ask us to flush the hardware queues.
To flush them, first we need to get the corresponding priority queues
from the RQPN mapping table.

Then we can check the available pages are equal to the originally
reserved pages, which means the hardware has returned all of the pages
it used to transmit.

Note that now we only check for 100 ms for the priority queue, but
sometimes if we have a lot of traffic (ex. 100Mbps up), some of the
packets could be dropped.
Signed-off-by: default avatarYan-Hsuan Chuang <yhchuang@realtek.com>
Signed-off-by: default avatarKalle Valo <kvalo@codeaurora.org>
parent 127eef1d
......@@ -719,6 +719,93 @@ int rtw_download_firmware(struct rtw_dev *rtwdev, struct rtw_fw_state *fw)
return ret;
}
static u32 get_priority_queues(struct rtw_dev *rtwdev, u32 queues)
{
struct rtw_rqpn *rqpn = rtwdev->fifo.rqpn;
u32 prio_queues = 0;
if (queues & BIT(IEEE80211_AC_VO))
prio_queues |= BIT(rqpn->dma_map_vo);
if (queues & BIT(IEEE80211_AC_VI))
prio_queues |= BIT(rqpn->dma_map_vi);
if (queues & BIT(IEEE80211_AC_BE))
prio_queues |= BIT(rqpn->dma_map_be);
if (queues & BIT(IEEE80211_AC_BK))
prio_queues |= BIT(rqpn->dma_map_bk);
return prio_queues;
}
static void __rtw_mac_flush_prio_queue(struct rtw_dev *rtwdev,
u32 prio_queue, bool drop)
{
u32 addr;
u16 avail_page, rsvd_page;
int i;
switch (prio_queue) {
case RTW_DMA_MAPPING_EXTRA:
addr = REG_FIFOPAGE_INFO_4;
break;
case RTW_DMA_MAPPING_LOW:
addr = REG_FIFOPAGE_INFO_2;
break;
case RTW_DMA_MAPPING_NORMAL:
addr = REG_FIFOPAGE_INFO_3;
break;
case RTW_DMA_MAPPING_HIGH:
addr = REG_FIFOPAGE_INFO_1;
break;
default:
return;
}
/* check if all of the reserved pages are available for 100 msecs */
for (i = 0; i < 5; i++) {
rsvd_page = rtw_read16(rtwdev, addr);
avail_page = rtw_read16(rtwdev, addr + 2);
if (rsvd_page == avail_page)
return;
msleep(20);
}
/* priority queue is still not empty, throw a warning,
*
* Note that if we want to flush the tx queue when having a lot of
* traffic (ex, 100Mbps up), some of the packets could be dropped.
* And it requires like ~2secs to flush the full priority queue.
*/
if (!drop)
rtw_warn(rtwdev, "timed out to flush queue %d\n", prio_queue);
}
static void rtw_mac_flush_prio_queues(struct rtw_dev *rtwdev,
u32 prio_queues, bool drop)
{
u32 q;
for (q = 0; q < RTW_DMA_MAPPING_MAX; q++)
if (prio_queues & BIT(q))
__rtw_mac_flush_prio_queue(rtwdev, q, drop);
}
void rtw_mac_flush_queues(struct rtw_dev *rtwdev, u32 queues, bool drop)
{
u32 prio_queues = 0;
/* If all of the hardware queues are requested to flush,
* or the priority queues are not mapped yet,
* flush all of the priority queues
*/
if (queues == BIT(rtwdev->hw->queues) - 1 || !rtwdev->fifo.rqpn)
prio_queues = BIT(RTW_DMA_MAPPING_MAX) - 1;
else
prio_queues = get_priority_queues(rtwdev, queues);
rtw_mac_flush_prio_queues(rtwdev, prio_queues, drop);
}
static int txdma_queue_mapping(struct rtw_dev *rtwdev)
{
struct rtw_chip_info *chip = rtwdev->chip;
......@@ -743,6 +830,7 @@ static int txdma_queue_mapping(struct rtw_dev *rtwdev)
return -EINVAL;
}
rtwdev->fifo.rqpn = rqpn;
txdma_pq_map |= BIT_TXDMA_HIQ_MAP(rqpn->dma_map_hi);
txdma_pq_map |= BIT_TXDMA_MGQ_MAP(rqpn->dma_map_mg);
txdma_pq_map |= BIT_TXDMA_BKQ_MAP(rqpn->dma_map_bk);
......
......@@ -31,5 +31,6 @@ int rtw_mac_power_on(struct rtw_dev *rtwdev);
void rtw_mac_power_off(struct rtw_dev *rtwdev);
int rtw_download_firmware(struct rtw_dev *rtwdev, struct rtw_fw_state *fw);
int rtw_mac_init(struct rtw_dev *rtwdev);
void rtw_mac_flush_queues(struct rtw_dev *rtwdev, u32 queues, bool drop);
#endif
......@@ -589,6 +589,19 @@ static void rtw_ops_sta_statistics(struct ieee80211_hw *hw,
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE);
}
static void rtw_ops_flush(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
u32 queues, bool drop)
{
struct rtw_dev *rtwdev = hw->priv;
mutex_lock(&rtwdev->mutex);
rtw_leave_lps_deep(rtwdev);
rtw_mac_flush_queues(rtwdev, queues, drop);
mutex_unlock(&rtwdev->mutex);
}
const struct ieee80211_ops rtw_ops = {
.tx = rtw_ops_tx,
.wake_tx_queue = rtw_ops_wake_tx_queue,
......@@ -608,5 +621,6 @@ const struct ieee80211_ops rtw_ops = {
.mgd_prepare_tx = rtw_ops_mgd_prepare_tx,
.set_rts_threshold = rtw_ops_set_rts_threshold,
.sta_statistics = rtw_ops_sta_statistics,
.flush = rtw_ops_flush,
};
EXPORT_SYMBOL(rtw_ops);
......@@ -780,6 +780,7 @@ enum rtw_dma_mapping {
RTW_DMA_MAPPING_NORMAL = 2,
RTW_DMA_MAPPING_HIGH = 3,
RTW_DMA_MAPPING_MAX,
RTW_DMA_MAPPING_UNDEF,
};
......@@ -1286,7 +1287,7 @@ struct rtw_fifo_conf {
u16 rsvd_cpu_instr_addr;
u16 rsvd_fw_txbuf_addr;
u16 rsvd_csibuf_addr;
enum rtw_dma_mapping pq_map[RTW_PQ_MAP_NUM];
struct rtw_rqpn *rqpn;
};
struct rtw_fw_state {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment