Commit 8357f0dc authored by Lorenzo Bianconi's avatar Lorenzo Bianconi Committed by Felix Fietkau

mt76: mt7615: use napi polling for tx cleanup

This allows tx scheduling and tx cleanup to run concurrently
Signed-off-by: default avatarLorenzo Bianconi <lorenzo@kernel.org>
Signed-off-by: default avatarFelix Fietkau <nbd@nbd.name>
parent 9e63f5e7
...@@ -93,18 +93,33 @@ void mt7615_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q, ...@@ -93,18 +93,33 @@ void mt7615_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
static void mt7615_tx_tasklet(unsigned long data) static void mt7615_tx_tasklet(unsigned long data)
{ {
struct mt7615_dev *dev = (struct mt7615_dev *)data; struct mt7615_dev *dev = (struct mt7615_dev *)data;
mt76_txq_schedule_all(&dev->mt76);
}
static int mt7615_poll_tx(struct napi_struct *napi, int budget)
{
static const u8 queue_map[] = { static const u8 queue_map[] = {
MT_TXQ_MCU, MT_TXQ_MCU,
MT_TXQ_BE MT_TXQ_BE
}; };
struct mt7615_dev *dev;
int i; int i;
dev = container_of(napi, struct mt7615_dev, mt76.tx_napi);
for (i = 0; i < ARRAY_SIZE(queue_map); i++) for (i = 0; i < ARRAY_SIZE(queue_map); i++)
mt76_queue_tx_cleanup(dev, queue_map[i], false); mt76_queue_tx_cleanup(dev, queue_map[i], false);
mt76_txq_schedule_all(&dev->mt76); if (napi_complete_done(napi, 0))
mt7615_irq_enable(dev, MT_INT_TX_DONE_ALL); mt7615_irq_enable(dev, MT_INT_TX_DONE_ALL);
for (i = 0; i < ARRAY_SIZE(queue_map); i++)
mt76_queue_tx_cleanup(dev, queue_map[i], false);
tasklet_schedule(&dev->mt76.tx_tasklet);
return 0;
} }
int mt7615_dma_init(struct mt7615_dev *dev) int mt7615_dma_init(struct mt7615_dev *dev)
...@@ -178,6 +193,10 @@ int mt7615_dma_init(struct mt7615_dev *dev) ...@@ -178,6 +193,10 @@ int mt7615_dma_init(struct mt7615_dev *dev)
if (ret < 0) if (ret < 0)
return ret; return ret;
netif_tx_napi_add(&dev->mt76.napi_dev, &dev->mt76.tx_napi,
mt7615_poll_tx, NAPI_POLL_WEIGHT);
napi_enable(&dev->mt76.tx_napi);
mt76_poll(dev, MT_WPDMA_GLO_CFG, mt76_poll(dev, MT_WPDMA_GLO_CFG,
MT_WPDMA_GLO_CFG_TX_DMA_BUSY | MT_WPDMA_GLO_CFG_TX_DMA_BUSY |
MT_WPDMA_GLO_CFG_RX_DMA_BUSY, 0, 1000); MT_WPDMA_GLO_CFG_RX_DMA_BUSY, 0, 1000);
...@@ -201,5 +220,6 @@ void mt7615_dma_cleanup(struct mt7615_dev *dev) ...@@ -201,5 +220,6 @@ void mt7615_dma_cleanup(struct mt7615_dev *dev)
mt76_set(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_SW_RESET); mt76_set(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_SW_RESET);
tasklet_kill(&dev->mt76.tx_tasklet); tasklet_kill(&dev->mt76.tx_tasklet);
netif_napi_del(&dev->mt76.tx_napi);
mt76_dma_cleanup(&dev->mt76); mt76_dma_cleanup(&dev->mt76);
} }
...@@ -49,7 +49,7 @@ irqreturn_t mt7615_irq_handler(int irq, void *dev_instance) ...@@ -49,7 +49,7 @@ irqreturn_t mt7615_irq_handler(int irq, void *dev_instance)
if (intr & MT_INT_TX_DONE_ALL) { if (intr & MT_INT_TX_DONE_ALL) {
mt7615_irq_disable(dev, MT_INT_TX_DONE_ALL); mt7615_irq_disable(dev, MT_INT_TX_DONE_ALL);
tasklet_schedule(&dev->mt76.tx_tasklet); napi_schedule(&dev->mt76.tx_napi);
} }
if (intr & MT_INT_RX_DONE(0)) { if (intr & MT_INT_RX_DONE(0)) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment