Commit fe5b5ab5 authored by Felix Fietkau's avatar Felix Fietkau

mt76: unify queue tx cleanup code

Cleanup and preparation for changing tx scheduling behavior
Signed-off-by: default avatarFelix Fietkau <nbd@nbd.name>
parent 16254fc5
......@@ -165,16 +165,8 @@ mt76_dma_tx_cleanup(struct mt76_dev *dev, enum mt76_txq_id qid, bool flush)
last = readl(&q->regs->dma_idx);
while (q->queued > 0 && q->tail != last) {
int swq_qid = -1;
mt76_dma_tx_cleanup_idx(dev, q, q->tail, &entry);
if (entry.schedule)
swq_qid = entry.qid;
q->tail = (q->tail + 1) % q->ndesc;
if (entry.skb)
dev->drv->tx_complete_skb(dev, qid, &entry);
mt76_queue_tx_complete(dev, q, &entry);
if (entry.txwi) {
if (!(dev->drv->drv_flags & MT_DRV_TXWI_NO_FREE))
......@@ -185,13 +177,6 @@ mt76_dma_tx_cleanup(struct mt76_dev *dev, enum mt76_txq_id qid, bool flush)
if (!flush && q->tail == last)
last = readl(&q->regs->dma_idx);
spin_lock_bh(&q->lock);
if (swq_qid >= 4)
dev->q_tx[__MT_TXQ_MAX + swq_qid - 4].swq_queued--;
else if (swq_qid >= 0)
dev->q_tx[swq_qid].swq_queued--;
q->queued--;
spin_unlock_bh(&q->lock);
}
if (flush) {
......
......@@ -1017,6 +1017,8 @@ void mt76_rx_poll_complete(struct mt76_dev *dev, enum mt76_rxq_id q,
struct napi_struct *napi);
void mt76_rx_aggr_reorder(struct sk_buff *skb, struct sk_buff_head *frames);
void mt76_testmode_tx_pending(struct mt76_dev *dev);
void mt76_queue_tx_complete(struct mt76_dev *dev, struct mt76_queue *q,
struct mt76_queue_entry *e);
/* usb */
static inline bool mt76u_urb_error(struct urb *urb)
......
......@@ -133,38 +133,28 @@ mt76s_process_rx_queue(struct mt76_dev *dev, struct mt76_queue *q)
return nframes;
}
static int mt76s_process_tx_queue(struct mt76_dev *dev, enum mt76_txq_id qid)
static void mt76s_process_tx_queue(struct mt76_dev *dev, enum mt76_txq_id qid)
{
struct mt76_sw_queue *sq = &dev->q_tx[qid];
u32 n_dequeued = 0, n_sw_dequeued = 0;
struct mt76_queue_entry entry;
struct mt76_queue *q = sq->q;
bool wake;
while (q->queued > n_dequeued) {
while (q->queued > 0) {
if (!q->entry[q->tail].done)
break;
if (q->entry[q->tail].schedule) {
q->entry[q->tail].schedule = false;
n_sw_dequeued++;
}
entry = q->entry[q->tail];
q->entry[q->tail].done = false;
q->tail = (q->tail + 1) % q->ndesc;
n_dequeued++;
q->entry[q->tail].schedule = false;
if (qid == MT_TXQ_MCU)
if (qid == MT_TXQ_MCU) {
dev_kfree_skb(entry.skb);
else
dev->drv->tx_complete_skb(dev, qid, &entry);
}
spin_lock_bh(&q->lock);
entry.skb = NULL;
}
sq->swq_queued -= n_sw_dequeued;
q->queued -= n_dequeued;
mt76_queue_tx_complete(dev, q, &entry);
}
wake = q->stopped && q->queued < q->ndesc - 8;
if (wake)
......@@ -173,18 +163,13 @@ static int mt76s_process_tx_queue(struct mt76_dev *dev, enum mt76_txq_id qid)
if (!q->queued)
wake_up(&dev->tx_wait);
spin_unlock_bh(&q->lock);
if (qid == MT_TXQ_MCU)
goto out;
return;
mt76_txq_schedule(&dev->phy, qid);
if (wake)
ieee80211_wake_queue(dev->hw, qid);
out:
return n_dequeued;
}
static void mt76s_tx_status_data(struct work_struct *work)
......
......@@ -696,3 +696,25 @@ int mt76_skb_adjust_pad(struct sk_buff *skb)
return 0;
}
EXPORT_SYMBOL_GPL(mt76_skb_adjust_pad);
void mt76_queue_tx_complete(struct mt76_dev *dev, struct mt76_queue *q,
struct mt76_queue_entry *e)
{
enum mt76_txq_id qid = e->qid % 4;
bool ext_phy = e->qid >= 4;
if (e->skb)
dev->drv->tx_complete_skb(dev, qid, e);
spin_lock_bh(&q->lock);
q->tail = (q->tail + 1) % q->ndesc;
q->queued--;
if (ext_phy)
qid += __MT_TXQ_MAX;
if (e->schedule)
dev->q_tx[qid].swq_queued--;
spin_unlock_bh(&q->lock);
}
EXPORT_SYMBOL_GPL(mt76_queue_tx_complete);
......@@ -802,33 +802,20 @@ static void mt76u_tx_tasklet(unsigned long data)
int i;
for (i = 0; i < IEEE80211_NUM_ACS; i++) {
u32 n_dequeued = 0, n_sw_dequeued = 0;
sq = &dev->q_tx[i];
q = sq->q;
while (q->queued > n_dequeued) {
while (q->queued > 0) {
if (!q->entry[q->tail].done)
break;
if (q->entry[q->tail].schedule) {
q->entry[q->tail].schedule = false;
n_sw_dequeued++;
}
entry = q->entry[q->tail];
q->entry[q->tail].done = false;
q->tail = (q->tail + 1) % q->ndesc;
n_dequeued++;
q->entry[q->tail].schedule = false;
dev->drv->tx_complete_skb(dev, i, &entry);
mt76_queue_tx_complete(dev, q, &entry);
}
spin_lock_bh(&q->lock);
sq->swq_queued -= n_sw_dequeued;
q->queued -= n_dequeued;
wake = q->stopped && q->queued < q->ndesc - 8;
if (wake)
q->stopped = false;
......@@ -836,8 +823,6 @@ static void mt76u_tx_tasklet(unsigned long data)
if (!q->queued)
wake_up(&dev->tx_wait);
spin_unlock_bh(&q->lock);
mt76_txq_schedule(&dev->phy, i);
if (dev->drv->tx_status_data &&
......@@ -1068,16 +1053,11 @@ void mt76u_stop_tx(struct mt76_dev *dev)
if (!q)
continue;
/* Assure we are in sync with killed tasklet. */
spin_lock_bh(&q->lock);
while (q->queued) {
entry = q->entry[q->tail];
q->tail = (q->tail + 1) % q->ndesc;
q->queued--;
entry = q->entry[q->tail];
q->entry[q->tail].done = false;
q->entry[q->tail].schedule = false;
dev->drv->tx_complete_skb(dev, i, &entry);
}
spin_unlock_bh(&q->lock);
mt76_queue_tx_complete(dev, q, &entry);
}
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment