Commit fe5b5ab5 authored by Felix Fietkau's avatar Felix Fietkau

mt76: unify queue tx cleanup code

Cleanup and preparation for changing tx scheduling behavior
Signed-off-by: default avatarFelix Fietkau <nbd@nbd.name>
parent 16254fc5
...@@ -165,16 +165,8 @@ mt76_dma_tx_cleanup(struct mt76_dev *dev, enum mt76_txq_id qid, bool flush) ...@@ -165,16 +165,8 @@ mt76_dma_tx_cleanup(struct mt76_dev *dev, enum mt76_txq_id qid, bool flush)
last = readl(&q->regs->dma_idx); last = readl(&q->regs->dma_idx);
while (q->queued > 0 && q->tail != last) { while (q->queued > 0 && q->tail != last) {
int swq_qid = -1;
mt76_dma_tx_cleanup_idx(dev, q, q->tail, &entry); mt76_dma_tx_cleanup_idx(dev, q, q->tail, &entry);
if (entry.schedule) mt76_queue_tx_complete(dev, q, &entry);
swq_qid = entry.qid;
q->tail = (q->tail + 1) % q->ndesc;
if (entry.skb)
dev->drv->tx_complete_skb(dev, qid, &entry);
if (entry.txwi) { if (entry.txwi) {
if (!(dev->drv->drv_flags & MT_DRV_TXWI_NO_FREE)) if (!(dev->drv->drv_flags & MT_DRV_TXWI_NO_FREE))
...@@ -185,13 +177,6 @@ mt76_dma_tx_cleanup(struct mt76_dev *dev, enum mt76_txq_id qid, bool flush) ...@@ -185,13 +177,6 @@ mt76_dma_tx_cleanup(struct mt76_dev *dev, enum mt76_txq_id qid, bool flush)
if (!flush && q->tail == last) if (!flush && q->tail == last)
last = readl(&q->regs->dma_idx); last = readl(&q->regs->dma_idx);
spin_lock_bh(&q->lock);
if (swq_qid >= 4)
dev->q_tx[__MT_TXQ_MAX + swq_qid - 4].swq_queued--;
else if (swq_qid >= 0)
dev->q_tx[swq_qid].swq_queued--;
q->queued--;
spin_unlock_bh(&q->lock);
} }
if (flush) { if (flush) {
......
...@@ -1017,6 +1017,8 @@ void mt76_rx_poll_complete(struct mt76_dev *dev, enum mt76_rxq_id q, ...@@ -1017,6 +1017,8 @@ void mt76_rx_poll_complete(struct mt76_dev *dev, enum mt76_rxq_id q,
struct napi_struct *napi); struct napi_struct *napi);
void mt76_rx_aggr_reorder(struct sk_buff *skb, struct sk_buff_head *frames); void mt76_rx_aggr_reorder(struct sk_buff *skb, struct sk_buff_head *frames);
void mt76_testmode_tx_pending(struct mt76_dev *dev); void mt76_testmode_tx_pending(struct mt76_dev *dev);
void mt76_queue_tx_complete(struct mt76_dev *dev, struct mt76_queue *q,
struct mt76_queue_entry *e);
/* usb */ /* usb */
static inline bool mt76u_urb_error(struct urb *urb) static inline bool mt76u_urb_error(struct urb *urb)
......
...@@ -133,38 +133,28 @@ mt76s_process_rx_queue(struct mt76_dev *dev, struct mt76_queue *q) ...@@ -133,38 +133,28 @@ mt76s_process_rx_queue(struct mt76_dev *dev, struct mt76_queue *q)
return nframes; return nframes;
} }
static int mt76s_process_tx_queue(struct mt76_dev *dev, enum mt76_txq_id qid) static void mt76s_process_tx_queue(struct mt76_dev *dev, enum mt76_txq_id qid)
{ {
struct mt76_sw_queue *sq = &dev->q_tx[qid]; struct mt76_sw_queue *sq = &dev->q_tx[qid];
u32 n_dequeued = 0, n_sw_dequeued = 0;
struct mt76_queue_entry entry; struct mt76_queue_entry entry;
struct mt76_queue *q = sq->q; struct mt76_queue *q = sq->q;
bool wake; bool wake;
while (q->queued > n_dequeued) { while (q->queued > 0) {
if (!q->entry[q->tail].done) if (!q->entry[q->tail].done)
break; break;
if (q->entry[q->tail].schedule) {
q->entry[q->tail].schedule = false;
n_sw_dequeued++;
}
entry = q->entry[q->tail]; entry = q->entry[q->tail];
q->entry[q->tail].done = false; q->entry[q->tail].done = false;
q->tail = (q->tail + 1) % q->ndesc; q->entry[q->tail].schedule = false;
n_dequeued++;
if (qid == MT_TXQ_MCU) if (qid == MT_TXQ_MCU) {
dev_kfree_skb(entry.skb); dev_kfree_skb(entry.skb);
else entry.skb = NULL;
dev->drv->tx_complete_skb(dev, qid, &entry);
} }
spin_lock_bh(&q->lock); mt76_queue_tx_complete(dev, q, &entry);
}
sq->swq_queued -= n_sw_dequeued;
q->queued -= n_dequeued;
wake = q->stopped && q->queued < q->ndesc - 8; wake = q->stopped && q->queued < q->ndesc - 8;
if (wake) if (wake)
...@@ -173,18 +163,13 @@ static int mt76s_process_tx_queue(struct mt76_dev *dev, enum mt76_txq_id qid) ...@@ -173,18 +163,13 @@ static int mt76s_process_tx_queue(struct mt76_dev *dev, enum mt76_txq_id qid)
if (!q->queued) if (!q->queued)
wake_up(&dev->tx_wait); wake_up(&dev->tx_wait);
spin_unlock_bh(&q->lock);
if (qid == MT_TXQ_MCU) if (qid == MT_TXQ_MCU)
goto out; return;
mt76_txq_schedule(&dev->phy, qid); mt76_txq_schedule(&dev->phy, qid);
if (wake) if (wake)
ieee80211_wake_queue(dev->hw, qid); ieee80211_wake_queue(dev->hw, qid);
out:
return n_dequeued;
} }
static void mt76s_tx_status_data(struct work_struct *work) static void mt76s_tx_status_data(struct work_struct *work)
......
...@@ -696,3 +696,25 @@ int mt76_skb_adjust_pad(struct sk_buff *skb) ...@@ -696,3 +696,25 @@ int mt76_skb_adjust_pad(struct sk_buff *skb)
return 0; return 0;
} }
EXPORT_SYMBOL_GPL(mt76_skb_adjust_pad); EXPORT_SYMBOL_GPL(mt76_skb_adjust_pad);
void mt76_queue_tx_complete(struct mt76_dev *dev, struct mt76_queue *q,
struct mt76_queue_entry *e)
{
enum mt76_txq_id qid = e->qid % 4;
bool ext_phy = e->qid >= 4;
if (e->skb)
dev->drv->tx_complete_skb(dev, qid, e);
spin_lock_bh(&q->lock);
q->tail = (q->tail + 1) % q->ndesc;
q->queued--;
if (ext_phy)
qid += __MT_TXQ_MAX;
if (e->schedule)
dev->q_tx[qid].swq_queued--;
spin_unlock_bh(&q->lock);
}
EXPORT_SYMBOL_GPL(mt76_queue_tx_complete);
...@@ -802,33 +802,20 @@ static void mt76u_tx_tasklet(unsigned long data) ...@@ -802,33 +802,20 @@ static void mt76u_tx_tasklet(unsigned long data)
int i; int i;
for (i = 0; i < IEEE80211_NUM_ACS; i++) { for (i = 0; i < IEEE80211_NUM_ACS; i++) {
u32 n_dequeued = 0, n_sw_dequeued = 0;
sq = &dev->q_tx[i]; sq = &dev->q_tx[i];
q = sq->q; q = sq->q;
while (q->queued > n_dequeued) { while (q->queued > 0) {
if (!q->entry[q->tail].done) if (!q->entry[q->tail].done)
break; break;
if (q->entry[q->tail].schedule) {
q->entry[q->tail].schedule = false;
n_sw_dequeued++;
}
entry = q->entry[q->tail]; entry = q->entry[q->tail];
q->entry[q->tail].done = false; q->entry[q->tail].done = false;
q->tail = (q->tail + 1) % q->ndesc; q->entry[q->tail].schedule = false;
n_dequeued++;
dev->drv->tx_complete_skb(dev, i, &entry); mt76_queue_tx_complete(dev, q, &entry);
} }
spin_lock_bh(&q->lock);
sq->swq_queued -= n_sw_dequeued;
q->queued -= n_dequeued;
wake = q->stopped && q->queued < q->ndesc - 8; wake = q->stopped && q->queued < q->ndesc - 8;
if (wake) if (wake)
q->stopped = false; q->stopped = false;
...@@ -836,8 +823,6 @@ static void mt76u_tx_tasklet(unsigned long data) ...@@ -836,8 +823,6 @@ static void mt76u_tx_tasklet(unsigned long data)
if (!q->queued) if (!q->queued)
wake_up(&dev->tx_wait); wake_up(&dev->tx_wait);
spin_unlock_bh(&q->lock);
mt76_txq_schedule(&dev->phy, i); mt76_txq_schedule(&dev->phy, i);
if (dev->drv->tx_status_data && if (dev->drv->tx_status_data &&
...@@ -1068,16 +1053,11 @@ void mt76u_stop_tx(struct mt76_dev *dev) ...@@ -1068,16 +1053,11 @@ void mt76u_stop_tx(struct mt76_dev *dev)
if (!q) if (!q)
continue; continue;
/* Assure we are in sync with killed tasklet. */
spin_lock_bh(&q->lock);
while (q->queued) {
entry = q->entry[q->tail]; entry = q->entry[q->tail];
q->tail = (q->tail + 1) % q->ndesc; q->entry[q->tail].done = false;
q->queued--; q->entry[q->tail].schedule = false;
dev->drv->tx_complete_skb(dev, i, &entry); mt76_queue_tx_complete(dev, q, &entry);
}
spin_unlock_bh(&q->lock);
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment