Commit e17d7945 authored by Felix Fietkau's avatar Felix Fietkau

mt76: mt7615: significantly reduce interrupt load

On 7615 and newer, DMA completion only triggers unmap, but not free of queued
skbs, since pointers to packets are queued internally.
Because of that, there is no need to process the main data queue immediately
on DMA completion.
To improve performance, mask out the DMA data queue completion interrupt and
process the queue only when we receive a txfree event.
This brings the number of interrupts under load down to a small fraction.
Signed-off-by: default avatarFelix Fietkau <nbd@nbd.name>
parent f8a667a9
......@@ -94,34 +94,16 @@ mt7615_init_tx_queues(struct mt7615_dev *dev)
return 0;
}
static void
mt7615_tx_cleanup(struct mt7615_dev *dev)
{
int i;
mt76_queue_tx_cleanup(dev, MT_TXQ_MCU, false);
mt76_queue_tx_cleanup(dev, MT_TXQ_PSD, false);
if (is_mt7615(&dev->mt76)) {
mt76_queue_tx_cleanup(dev, MT_TXQ_BE, false);
} else {
for (i = 0; i < IEEE80211_NUM_ACS; i++)
mt76_queue_tx_cleanup(dev, i, false);
}
}
static int mt7615_poll_tx(struct napi_struct *napi, int budget)
{
struct mt7615_dev *dev;
dev = container_of(napi, struct mt7615_dev, mt76.tx_napi);
mt7615_tx_cleanup(dev);
mt7615_pm_power_save_sched(dev);
tasklet_schedule(&dev->mt76.tx_tasklet);
mt76_queue_tx_cleanup(dev, MT_TXQ_MCU, false);
if (napi_complete_done(napi, 0))
mt7615_irq_enable(dev, MT_INT_TX_DONE_ALL);
mt7615_irq_enable(dev, mt7615_tx_mcu_int_mask(dev));
return 0;
}
......@@ -305,7 +287,7 @@ int mt7615_dma_init(struct mt7615_dev *dev)
MT_WPDMA_GLO_CFG_RX_DMA_EN);
/* enable interrupts for TX/RX rings */
mt7615_irq_enable(dev, MT_INT_RX_DONE_ALL | MT_INT_TX_DONE_ALL |
mt7615_irq_enable(dev, MT_INT_RX_DONE_ALL | mt7615_tx_mcu_int_mask(dev) |
MT_INT_MCU_CMD);
if (is_mt7622(&dev->mt76))
......
......@@ -1424,6 +1424,14 @@ static void mt7615_mac_tx_free(struct mt7615_dev *dev, struct sk_buff *skb)
struct mt7615_tx_free *free = (struct mt7615_tx_free *)skb->data;
u8 i, count;
mt76_queue_tx_cleanup(dev, MT_TXQ_PSD, false);
if (is_mt7615(&dev->mt76)) {
mt76_queue_tx_cleanup(dev, MT_TXQ_BE, false);
} else {
for (i = 0; i < IEEE80211_NUM_ACS; i++)
mt76_queue_tx_cleanup(dev, i, false);
}
count = FIELD_GET(MT_TX_FREE_MSDU_ID_CNT, le16_to_cpu(free->ctrl));
if (is_mt7615(&dev->mt76)) {
__le16 *token = &free->token[0];
......
......@@ -101,7 +101,7 @@ static irqreturn_t mt7615_irq_handler(int irq, void *dev_instance)
static void mt7615_irq_tasklet(unsigned long data)
{
struct mt7615_dev *dev = (struct mt7615_dev *)data;
u32 intr, mask = 0;
u32 intr, mask = 0, tx_mcu_mask = mt7615_tx_mcu_int_mask(dev);
mt76_wr(dev, MT_INT_MASK_CSR, 0);
......@@ -112,11 +112,11 @@ static void mt7615_irq_tasklet(unsigned long data)
trace_dev_irq(&dev->mt76, intr, dev->mt76.mmio.irqmask);
mask |= intr & MT_INT_RX_DONE_ALL;
if (intr & MT_INT_TX_DONE_ALL)
mask |= MT_INT_TX_DONE_ALL;
if (intr & tx_mcu_mask)
mask |= tx_mcu_mask;
mt76_set_irq_mask(&dev->mt76, MT_INT_MASK_CSR, mask, 0);
if (intr & MT_INT_TX_DONE_ALL)
if (intr & tx_mcu_mask)
napi_schedule(&dev->mt76.tx_napi);
if (intr & MT_INT_RX_DONE(0))
......
......@@ -540,6 +540,11 @@ static inline u8 mt7615_lmac_mapping(struct mt7615_dev *dev, u8 ac)
return lmac_queue_map[ac];
}
static inline u32 mt7615_tx_mcu_int_mask(struct mt7615_dev *dev)
{
return MT_INT_TX_DONE(dev->mt76.q_tx[MT_TXQ_MCU].q->hw_idx);
}
void mt7615_dma_reset(struct mt7615_dev *dev);
void mt7615_scan_work(struct work_struct *work);
void mt7615_roc_work(struct work_struct *work);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment