Commit 2e420b88 authored by Lorenzo Bianconi's avatar Lorenzo Bianconi Committed by Felix Fietkau

wifi: mt76: introduce wed pointer in mt76_queue

Introduce mtk_wed_device pointer in mt76_queue structure in order to
configure WED chip.
Get rid of dev parameter in Q_READ and Q_WRITE macros.
Introduce wed parameter to the following routine signatures:
- mt76_init_queue
- mt76_init_tx_queue

This is a preliminary patch to introduce WED support for mt7996 since
mt7996 runs two separate WED chips.
Signed-off-by: default avatarLorenzo Bianconi <lorenzo@kernel.org>
Signed-off-by: default avatarFelix Fietkau <nbd@nbd.name>
parent 132d74d3
...@@ -9,11 +9,11 @@ ...@@ -9,11 +9,11 @@
#if IS_ENABLED(CONFIG_NET_MEDIATEK_SOC_WED) #if IS_ENABLED(CONFIG_NET_MEDIATEK_SOC_WED)
#define Q_READ(_dev, _q, _field) ({ \ #define Q_READ(_q, _field) ({ \
u32 _offset = offsetof(struct mt76_queue_regs, _field); \ u32 _offset = offsetof(struct mt76_queue_regs, _field); \
u32 _val; \ u32 _val; \
if ((_q)->flags & MT_QFLAG_WED) \ if ((_q)->flags & MT_QFLAG_WED) \
_val = mtk_wed_device_reg_read(&(_dev)->mmio.wed, \ _val = mtk_wed_device_reg_read((_q)->wed, \
((_q)->wed_regs + \ ((_q)->wed_regs + \
_offset)); \ _offset)); \
else \ else \
...@@ -21,10 +21,10 @@ ...@@ -21,10 +21,10 @@
_val; \ _val; \
}) })
#define Q_WRITE(_dev, _q, _field, _val) do { \ #define Q_WRITE(_q, _field, _val) do { \
u32 _offset = offsetof(struct mt76_queue_regs, _field); \ u32 _offset = offsetof(struct mt76_queue_regs, _field); \
if ((_q)->flags & MT_QFLAG_WED) \ if ((_q)->flags & MT_QFLAG_WED) \
mtk_wed_device_reg_write(&(_dev)->mmio.wed, \ mtk_wed_device_reg_write((_q)->wed, \
((_q)->wed_regs + _offset), \ ((_q)->wed_regs + _offset), \
_val); \ _val); \
else \ else \
...@@ -33,8 +33,8 @@ ...@@ -33,8 +33,8 @@
#else #else
#define Q_READ(_dev, _q, _field) readl(&(_q)->regs->_field) #define Q_READ(_q, _field) readl(&(_q)->regs->_field)
#define Q_WRITE(_dev, _q, _field, _val) writel(_val, &(_q)->regs->_field) #define Q_WRITE(_q, _field, _val) writel(_val, &(_q)->regs->_field)
#endif #endif
...@@ -188,9 +188,9 @@ EXPORT_SYMBOL_GPL(mt76_free_pending_rxwi); ...@@ -188,9 +188,9 @@ EXPORT_SYMBOL_GPL(mt76_free_pending_rxwi);
static void static void
mt76_dma_sync_idx(struct mt76_dev *dev, struct mt76_queue *q) mt76_dma_sync_idx(struct mt76_dev *dev, struct mt76_queue *q)
{ {
Q_WRITE(dev, q, desc_base, q->desc_dma); Q_WRITE(q, desc_base, q->desc_dma);
Q_WRITE(dev, q, ring_size, q->ndesc); Q_WRITE(q, ring_size, q->ndesc);
q->head = Q_READ(dev, q, dma_idx); q->head = Q_READ(q, dma_idx);
q->tail = q->head; q->tail = q->head;
} }
...@@ -206,8 +206,8 @@ mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q) ...@@ -206,8 +206,8 @@ mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q)
for (i = 0; i < q->ndesc; i++) for (i = 0; i < q->ndesc; i++)
q->desc[i].ctrl = cpu_to_le32(MT_DMA_CTL_DMA_DONE); q->desc[i].ctrl = cpu_to_le32(MT_DMA_CTL_DMA_DONE);
Q_WRITE(dev, q, cpu_idx, 0); Q_WRITE(q, cpu_idx, 0);
Q_WRITE(dev, q, dma_idx, 0); Q_WRITE(q, dma_idx, 0);
mt76_dma_sync_idx(dev, q); mt76_dma_sync_idx(dev, q);
} }
...@@ -343,7 +343,7 @@ static void ...@@ -343,7 +343,7 @@ static void
mt76_dma_kick_queue(struct mt76_dev *dev, struct mt76_queue *q) mt76_dma_kick_queue(struct mt76_dev *dev, struct mt76_queue *q)
{ {
wmb(); wmb();
Q_WRITE(dev, q, cpu_idx, q->head); Q_WRITE(q, cpu_idx, q->head);
} }
static void static void
...@@ -359,7 +359,7 @@ mt76_dma_tx_cleanup(struct mt76_dev *dev, struct mt76_queue *q, bool flush) ...@@ -359,7 +359,7 @@ mt76_dma_tx_cleanup(struct mt76_dev *dev, struct mt76_queue *q, bool flush)
if (flush) if (flush)
last = -1; last = -1;
else else
last = Q_READ(dev, q, dma_idx); last = Q_READ(q, dma_idx);
while (q->queued > 0 && q->tail != last) { while (q->queued > 0 && q->tail != last) {
mt76_dma_tx_cleanup_idx(dev, q, q->tail, &entry); mt76_dma_tx_cleanup_idx(dev, q, q->tail, &entry);
...@@ -371,7 +371,7 @@ mt76_dma_tx_cleanup(struct mt76_dev *dev, struct mt76_queue *q, bool flush) ...@@ -371,7 +371,7 @@ mt76_dma_tx_cleanup(struct mt76_dev *dev, struct mt76_queue *q, bool flush)
} }
if (!flush && q->tail == last) if (!flush && q->tail == last)
last = Q_READ(dev, q, dma_idx); last = Q_READ(q, dma_idx);
} }
spin_unlock_bh(&q->cleanup_lock); spin_unlock_bh(&q->cleanup_lock);
...@@ -641,7 +641,6 @@ mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q, ...@@ -641,7 +641,6 @@ mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q,
int mt76_dma_wed_setup(struct mt76_dev *dev, struct mt76_queue *q, bool reset) int mt76_dma_wed_setup(struct mt76_dev *dev, struct mt76_queue *q, bool reset)
{ {
#ifdef CONFIG_NET_MEDIATEK_SOC_WED #ifdef CONFIG_NET_MEDIATEK_SOC_WED
struct mtk_wed_device *wed = &dev->mmio.wed;
int ret, type, ring; int ret, type, ring;
u8 flags; u8 flags;
...@@ -649,7 +648,7 @@ int mt76_dma_wed_setup(struct mt76_dev *dev, struct mt76_queue *q, bool reset) ...@@ -649,7 +648,7 @@ int mt76_dma_wed_setup(struct mt76_dev *dev, struct mt76_queue *q, bool reset)
return -EINVAL; return -EINVAL;
flags = q->flags; flags = q->flags;
if (!mtk_wed_device_active(wed)) if (!q->wed || !mtk_wed_device_active(q->wed))
q->flags &= ~MT_QFLAG_WED; q->flags &= ~MT_QFLAG_WED;
if (!(q->flags & MT_QFLAG_WED)) if (!(q->flags & MT_QFLAG_WED))
...@@ -660,9 +659,10 @@ int mt76_dma_wed_setup(struct mt76_dev *dev, struct mt76_queue *q, bool reset) ...@@ -660,9 +659,10 @@ int mt76_dma_wed_setup(struct mt76_dev *dev, struct mt76_queue *q, bool reset)
switch (type) { switch (type) {
case MT76_WED_Q_TX: case MT76_WED_Q_TX:
ret = mtk_wed_device_tx_ring_setup(wed, ring, q->regs, reset); ret = mtk_wed_device_tx_ring_setup(q->wed, ring, q->regs,
reset);
if (!ret) if (!ret)
q->wed_regs = wed->tx_ring[ring].reg_base; q->wed_regs = q->wed->tx_ring[ring].reg_base;
break; break;
case MT76_WED_Q_TXFREE: case MT76_WED_Q_TXFREE:
/* WED txfree queue needs ring to be initialized before setup */ /* WED txfree queue needs ring to be initialized before setup */
...@@ -671,14 +671,15 @@ int mt76_dma_wed_setup(struct mt76_dev *dev, struct mt76_queue *q, bool reset) ...@@ -671,14 +671,15 @@ int mt76_dma_wed_setup(struct mt76_dev *dev, struct mt76_queue *q, bool reset)
mt76_dma_rx_fill(dev, q, false); mt76_dma_rx_fill(dev, q, false);
q->flags = flags; q->flags = flags;
ret = mtk_wed_device_txfree_ring_setup(wed, q->regs); ret = mtk_wed_device_txfree_ring_setup(q->wed, q->regs);
if (!ret) if (!ret)
q->wed_regs = wed->txfree_ring.reg_base; q->wed_regs = q->wed->txfree_ring.reg_base;
break; break;
case MT76_WED_Q_RX: case MT76_WED_Q_RX:
ret = mtk_wed_device_rx_ring_setup(wed, ring, q->regs, reset); ret = mtk_wed_device_rx_ring_setup(q->wed, ring, q->regs,
reset);
if (!ret) if (!ret)
q->wed_regs = wed->rx_ring[ring].reg_base; q->wed_regs = q->wed->rx_ring[ring].reg_base;
break; break;
default: default:
ret = -EINVAL; ret = -EINVAL;
...@@ -819,7 +820,7 @@ mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget) ...@@ -819,7 +820,7 @@ mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget)
if (IS_ENABLED(CONFIG_NET_MEDIATEK_SOC_WED) && if (IS_ENABLED(CONFIG_NET_MEDIATEK_SOC_WED) &&
mt76_queue_is_wed_tx_free(q)) { mt76_queue_is_wed_tx_free(q)) {
dma_idx = Q_READ(dev, q, dma_idx); dma_idx = Q_READ(q, dma_idx);
check_ddone = true; check_ddone = true;
} }
...@@ -829,7 +830,7 @@ mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget) ...@@ -829,7 +830,7 @@ mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget)
if (check_ddone) { if (check_ddone) {
if (q->tail == dma_idx) if (q->tail == dma_idx)
dma_idx = Q_READ(dev, q, dma_idx); dma_idx = Q_READ(q, dma_idx);
if (q->tail == dma_idx) if (q->tail == dma_idx)
break; break;
......
...@@ -1736,7 +1736,7 @@ EXPORT_SYMBOL_GPL(mt76_get_antenna); ...@@ -1736,7 +1736,7 @@ EXPORT_SYMBOL_GPL(mt76_get_antenna);
struct mt76_queue * struct mt76_queue *
mt76_init_queue(struct mt76_dev *dev, int qid, int idx, int n_desc, mt76_init_queue(struct mt76_dev *dev, int qid, int idx, int n_desc,
int ring_base, u32 flags) int ring_base, void *wed, u32 flags)
{ {
struct mt76_queue *hwq; struct mt76_queue *hwq;
int err; int err;
...@@ -1746,6 +1746,7 @@ mt76_init_queue(struct mt76_dev *dev, int qid, int idx, int n_desc, ...@@ -1746,6 +1746,7 @@ mt76_init_queue(struct mt76_dev *dev, int qid, int idx, int n_desc,
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
hwq->flags = flags; hwq->flags = flags;
hwq->wed = wed;
err = dev->queue_ops->alloc(dev, hwq, idx, n_desc, 0, ring_base); err = dev->queue_ops->alloc(dev, hwq, idx, n_desc, 0, ring_base);
if (err < 0) if (err < 0)
......
...@@ -199,6 +199,7 @@ struct mt76_queue { ...@@ -199,6 +199,7 @@ struct mt76_queue {
u8 hw_idx; u8 hw_idx;
u8 flags; u8 flags;
struct mtk_wed_device *wed;
u32 wed_regs; u32 wed_regs;
dma_addr_t desc_dma; dma_addr_t desc_dma;
...@@ -1121,15 +1122,16 @@ int mt76_get_of_data_from_nvmem(struct mt76_dev *dev, void *eep, ...@@ -1121,15 +1122,16 @@ int mt76_get_of_data_from_nvmem(struct mt76_dev *dev, void *eep,
struct mt76_queue * struct mt76_queue *
mt76_init_queue(struct mt76_dev *dev, int qid, int idx, int n_desc, mt76_init_queue(struct mt76_dev *dev, int qid, int idx, int n_desc,
int ring_base, u32 flags); int ring_base, void *wed, u32 flags);
u16 mt76_calculate_default_rate(struct mt76_phy *phy, u16 mt76_calculate_default_rate(struct mt76_phy *phy,
struct ieee80211_vif *vif, int rateidx); struct ieee80211_vif *vif, int rateidx);
static inline int mt76_init_tx_queue(struct mt76_phy *phy, int qid, int idx, static inline int mt76_init_tx_queue(struct mt76_phy *phy, int qid, int idx,
int n_desc, int ring_base, u32 flags) int n_desc, int ring_base, void *wed,
u32 flags)
{ {
struct mt76_queue *q; struct mt76_queue *q;
q = mt76_init_queue(phy->dev, qid, idx, n_desc, ring_base, flags); q = mt76_init_queue(phy->dev, qid, idx, n_desc, ring_base, wed, flags);
if (IS_ERR(q)) if (IS_ERR(q))
return PTR_ERR(q); return PTR_ERR(q);
...@@ -1143,7 +1145,7 @@ static inline int mt76_init_mcu_queue(struct mt76_dev *dev, int qid, int idx, ...@@ -1143,7 +1145,7 @@ static inline int mt76_init_mcu_queue(struct mt76_dev *dev, int qid, int idx,
{ {
struct mt76_queue *q; struct mt76_queue *q;
q = mt76_init_queue(dev, qid, idx, n_desc, ring_base, 0); q = mt76_init_queue(dev, qid, idx, n_desc, ring_base, NULL, 0);
if (IS_ERR(q)) if (IS_ERR(q))
return PTR_ERR(q); return PTR_ERR(q);
......
...@@ -173,13 +173,14 @@ int mt7603_dma_init(struct mt7603_dev *dev) ...@@ -173,13 +173,14 @@ int mt7603_dma_init(struct mt7603_dev *dev)
for (i = 0; i < ARRAY_SIZE(wmm_queue_map); i++) { for (i = 0; i < ARRAY_SIZE(wmm_queue_map); i++) {
ret = mt76_init_tx_queue(&dev->mphy, i, wmm_queue_map[i], ret = mt76_init_tx_queue(&dev->mphy, i, wmm_queue_map[i],
MT7603_TX_RING_SIZE, MT_TX_RING_BASE, 0); MT7603_TX_RING_SIZE, MT_TX_RING_BASE,
NULL, 0);
if (ret) if (ret)
return ret; return ret;
} }
ret = mt76_init_tx_queue(&dev->mphy, MT_TXQ_PSD, MT_TX_HW_QUEUE_MGMT, ret = mt76_init_tx_queue(&dev->mphy, MT_TXQ_PSD, MT_TX_HW_QUEUE_MGMT,
MT7603_PSD_RING_SIZE, MT_TX_RING_BASE, 0); MT7603_PSD_RING_SIZE, MT_TX_RING_BASE, NULL, 0);
if (ret) if (ret)
return ret; return ret;
...@@ -189,12 +190,12 @@ int mt7603_dma_init(struct mt7603_dev *dev) ...@@ -189,12 +190,12 @@ int mt7603_dma_init(struct mt7603_dev *dev)
return ret; return ret;
ret = mt76_init_tx_queue(&dev->mphy, MT_TXQ_BEACON, MT_TX_HW_QUEUE_BCN, ret = mt76_init_tx_queue(&dev->mphy, MT_TXQ_BEACON, MT_TX_HW_QUEUE_BCN,
MT_MCU_RING_SIZE, MT_TX_RING_BASE, 0); MT_MCU_RING_SIZE, MT_TX_RING_BASE, NULL, 0);
if (ret) if (ret)
return ret; return ret;
ret = mt76_init_tx_queue(&dev->mphy, MT_TXQ_CAB, MT_TX_HW_QUEUE_BMC, ret = mt76_init_tx_queue(&dev->mphy, MT_TXQ_CAB, MT_TX_HW_QUEUE_BMC,
MT_MCU_RING_SIZE, MT_TX_RING_BASE, 0); MT_MCU_RING_SIZE, MT_TX_RING_BASE, NULL, 0);
if (ret) if (ret)
return ret; return ret;
......
...@@ -26,14 +26,14 @@ mt7622_init_tx_queues_multi(struct mt7615_dev *dev) ...@@ -26,14 +26,14 @@ mt7622_init_tx_queues_multi(struct mt7615_dev *dev)
for (i = 0; i < ARRAY_SIZE(wmm_queue_map); i++) { for (i = 0; i < ARRAY_SIZE(wmm_queue_map); i++) {
ret = mt76_init_tx_queue(&dev->mphy, i, wmm_queue_map[i], ret = mt76_init_tx_queue(&dev->mphy, i, wmm_queue_map[i],
MT7615_TX_RING_SIZE / 2, MT7615_TX_RING_SIZE / 2,
MT_TX_RING_BASE, 0); MT_TX_RING_BASE, NULL, 0);
if (ret) if (ret)
return ret; return ret;
} }
ret = mt76_init_tx_queue(&dev->mphy, MT_TXQ_PSD, MT7622_TXQ_MGMT, ret = mt76_init_tx_queue(&dev->mphy, MT_TXQ_PSD, MT7622_TXQ_MGMT,
MT7615_TX_MGMT_RING_SIZE, MT7615_TX_MGMT_RING_SIZE,
MT_TX_RING_BASE, 0); MT_TX_RING_BASE, NULL, 0);
if (ret) if (ret)
return ret; return ret;
...@@ -55,7 +55,7 @@ mt7615_init_tx_queues(struct mt7615_dev *dev) ...@@ -55,7 +55,7 @@ mt7615_init_tx_queues(struct mt7615_dev *dev)
return mt7622_init_tx_queues_multi(dev); return mt7622_init_tx_queues_multi(dev);
ret = mt76_connac_init_tx_queues(&dev->mphy, 0, MT7615_TX_RING_SIZE, ret = mt76_connac_init_tx_queues(&dev->mphy, 0, MT7615_TX_RING_SIZE,
MT_TX_RING_BASE, 0); MT_TX_RING_BASE, NULL, 0);
if (ret) if (ret)
return ret; return ret;
......
...@@ -391,7 +391,8 @@ mt76_connac_mutex_release(struct mt76_dev *dev, struct mt76_connac_pm *pm) ...@@ -391,7 +391,8 @@ mt76_connac_mutex_release(struct mt76_dev *dev, struct mt76_connac_pm *pm)
void mt76_connac_gen_ppe_thresh(u8 *he_ppet, int nss); void mt76_connac_gen_ppe_thresh(u8 *he_ppet, int nss);
int mt76_connac_init_tx_queues(struct mt76_phy *phy, int idx, int n_desc, int mt76_connac_init_tx_queues(struct mt76_phy *phy, int idx, int n_desc,
int ring_base, u32 flags); int ring_base, void *wed, u32 flags);
void mt76_connac_write_hw_txp(struct mt76_dev *dev, void mt76_connac_write_hw_txp(struct mt76_dev *dev,
struct mt76_tx_info *tx_info, struct mt76_tx_info *tx_info,
void *txp_ptr, u32 id); void *txp_ptr, u32 id);
......
...@@ -256,11 +256,12 @@ void mt76_connac_txp_skb_unmap(struct mt76_dev *dev, ...@@ -256,11 +256,12 @@ void mt76_connac_txp_skb_unmap(struct mt76_dev *dev,
EXPORT_SYMBOL_GPL(mt76_connac_txp_skb_unmap); EXPORT_SYMBOL_GPL(mt76_connac_txp_skb_unmap);
int mt76_connac_init_tx_queues(struct mt76_phy *phy, int idx, int n_desc, int mt76_connac_init_tx_queues(struct mt76_phy *phy, int idx, int n_desc,
int ring_base, u32 flags) int ring_base, void *wed, u32 flags)
{ {
int i, err; int i, err;
err = mt76_init_tx_queue(phy, 0, idx, n_desc, ring_base, flags); err = mt76_init_tx_queue(phy, 0, idx, n_desc, ring_base,
wed, flags);
if (err < 0) if (err < 0)
return err; return err;
......
...@@ -199,13 +199,14 @@ int mt76x02_dma_init(struct mt76x02_dev *dev) ...@@ -199,13 +199,14 @@ int mt76x02_dma_init(struct mt76x02_dev *dev)
for (i = 0; i < IEEE80211_NUM_ACS; i++) { for (i = 0; i < IEEE80211_NUM_ACS; i++) {
ret = mt76_init_tx_queue(&dev->mphy, i, mt76_ac_to_hwq(i), ret = mt76_init_tx_queue(&dev->mphy, i, mt76_ac_to_hwq(i),
MT76x02_TX_RING_SIZE, MT76x02_TX_RING_SIZE,
MT_TX_RING_BASE, 0); MT_TX_RING_BASE, NULL, 0);
if (ret) if (ret)
return ret; return ret;
} }
ret = mt76_init_tx_queue(&dev->mphy, MT_TXQ_PSD, MT_TX_HW_QUEUE_MGMT, ret = mt76_init_tx_queue(&dev->mphy, MT_TXQ_PSD, MT_TX_HW_QUEUE_MGMT,
MT76x02_PSD_RING_SIZE, MT_TX_RING_BASE, 0); MT76x02_PSD_RING_SIZE, MT_TX_RING_BASE,
NULL, 0);
if (ret) if (ret)
return ret; return ret;
......
...@@ -9,18 +9,20 @@ static int ...@@ -9,18 +9,20 @@ static int
mt7915_init_tx_queues(struct mt7915_phy *phy, int idx, int n_desc, int ring_base) mt7915_init_tx_queues(struct mt7915_phy *phy, int idx, int n_desc, int ring_base)
{ {
struct mt7915_dev *dev = phy->dev; struct mt7915_dev *dev = phy->dev;
struct mtk_wed_device *wed = NULL;
if (mtk_wed_device_active(&phy->dev->mt76.mmio.wed)) { if (mtk_wed_device_active(&dev->mt76.mmio.wed)) {
if (is_mt798x(&dev->mt76)) if (is_mt798x(&dev->mt76))
ring_base += MT_TXQ_ID(0) * MT_RING_SIZE; ring_base += MT_TXQ_ID(0) * MT_RING_SIZE;
else else
ring_base = MT_WED_TX_RING_BASE; ring_base = MT_WED_TX_RING_BASE;
idx -= MT_TXQ_ID(0); idx -= MT_TXQ_ID(0);
wed = &dev->mt76.mmio.wed;
} }
return mt76_connac_init_tx_queues(phy->mt76, idx, n_desc, ring_base, return mt76_connac_init_tx_queues(phy->mt76, idx, n_desc, ring_base,
MT_WED_Q_TX(idx)); wed, MT_WED_Q_TX(idx));
} }
static int mt7915_poll_tx(struct napi_struct *napi, int budget) static int mt7915_poll_tx(struct napi_struct *napi, int budget)
...@@ -492,7 +494,8 @@ int mt7915_dma_init(struct mt7915_dev *dev, struct mt7915_phy *phy2) ...@@ -492,7 +494,8 @@ int mt7915_dma_init(struct mt7915_dev *dev, struct mt7915_phy *phy2)
if (mtk_wed_device_active(&mdev->mmio.wed) && is_mt7915(mdev)) { if (mtk_wed_device_active(&mdev->mmio.wed) && is_mt7915(mdev)) {
wa_rx_base = MT_WED_RX_RING_BASE; wa_rx_base = MT_WED_RX_RING_BASE;
wa_rx_idx = MT7915_RXQ_MCU_WA; wa_rx_idx = MT7915_RXQ_MCU_WA;
dev->mt76.q_rx[MT_RXQ_MCU_WA].flags = MT_WED_Q_TXFREE; mdev->q_rx[MT_RXQ_MCU_WA].flags = MT_WED_Q_TXFREE;
mdev->q_rx[MT_RXQ_MCU_WA].wed = &mdev->mmio.wed;
} else { } else {
wa_rx_base = MT_RXQ_RING_BASE(MT_RXQ_MCU_WA); wa_rx_base = MT_RXQ_RING_BASE(MT_RXQ_MCU_WA);
wa_rx_idx = MT_RXQ_ID(MT_RXQ_MCU_WA); wa_rx_idx = MT_RXQ_ID(MT_RXQ_MCU_WA);
...@@ -507,9 +510,10 @@ int mt7915_dma_init(struct mt7915_dev *dev, struct mt7915_phy *phy2) ...@@ -507,9 +510,10 @@ int mt7915_dma_init(struct mt7915_dev *dev, struct mt7915_phy *phy2)
if (!dev->phy.mt76->band_idx) { if (!dev->phy.mt76->band_idx) {
if (mtk_wed_device_active(&mdev->mmio.wed) && if (mtk_wed_device_active(&mdev->mmio.wed) &&
mtk_wed_get_rx_capa(&mdev->mmio.wed)) { mtk_wed_get_rx_capa(&mdev->mmio.wed)) {
dev->mt76.q_rx[MT_RXQ_MAIN].flags = mdev->q_rx[MT_RXQ_MAIN].flags =
MT_WED_Q_RX(MT7915_RXQ_BAND0); MT_WED_Q_RX(MT7915_RXQ_BAND0);
dev->mt76.rx_token_size += MT7915_RX_RING_SIZE; dev->mt76.rx_token_size += MT7915_RX_RING_SIZE;
mdev->q_rx[MT_RXQ_MAIN].wed = &mdev->mmio.wed;
} }
ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MAIN], ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MAIN],
...@@ -528,6 +532,7 @@ int mt7915_dma_init(struct mt7915_dev *dev, struct mt7915_phy *phy2) ...@@ -528,6 +532,7 @@ int mt7915_dma_init(struct mt7915_dev *dev, struct mt7915_phy *phy2)
if (mtk_wed_device_active(&mdev->mmio.wed)) { if (mtk_wed_device_active(&mdev->mmio.wed)) {
mdev->q_rx[MT_RXQ_MAIN_WA].flags = MT_WED_Q_TXFREE; mdev->q_rx[MT_RXQ_MAIN_WA].flags = MT_WED_Q_TXFREE;
mdev->q_rx[MT_RXQ_MAIN_WA].wed = &mdev->mmio.wed;
if (is_mt7916(mdev)) { if (is_mt7916(mdev)) {
wa_rx_base = MT_WED_RX_RING_BASE; wa_rx_base = MT_WED_RX_RING_BASE;
wa_rx_idx = MT7915_RXQ_MCU_WA; wa_rx_idx = MT7915_RXQ_MCU_WA;
...@@ -544,9 +549,10 @@ int mt7915_dma_init(struct mt7915_dev *dev, struct mt7915_phy *phy2) ...@@ -544,9 +549,10 @@ int mt7915_dma_init(struct mt7915_dev *dev, struct mt7915_phy *phy2)
if (dev->dbdc_support || dev->phy.mt76->band_idx) { if (dev->dbdc_support || dev->phy.mt76->band_idx) {
if (mtk_wed_device_active(&mdev->mmio.wed) && if (mtk_wed_device_active(&mdev->mmio.wed) &&
mtk_wed_get_rx_capa(&mdev->mmio.wed)) { mtk_wed_get_rx_capa(&mdev->mmio.wed)) {
dev->mt76.q_rx[MT_RXQ_BAND1].flags = mdev->q_rx[MT_RXQ_BAND1].flags =
MT_WED_Q_RX(MT7915_RXQ_BAND1); MT_WED_Q_RX(MT7915_RXQ_BAND1);
dev->mt76.rx_token_size += MT7915_RX_RING_SIZE; dev->mt76.rx_token_size += MT7915_RX_RING_SIZE;
mdev->q_rx[MT_RXQ_BAND1].wed = &mdev->mmio.wed;
} }
/* rx data queue for band1 */ /* rx data queue for band1 */
......
...@@ -171,7 +171,7 @@ static int mt7921_dma_init(struct mt792x_dev *dev) ...@@ -171,7 +171,7 @@ static int mt7921_dma_init(struct mt792x_dev *dev)
/* init tx queue */ /* init tx queue */
ret = mt76_connac_init_tx_queues(dev->phy.mt76, MT7921_TXQ_BAND0, ret = mt76_connac_init_tx_queues(dev->phy.mt76, MT7921_TXQ_BAND0,
MT7921_TX_RING_SIZE, MT7921_TX_RING_SIZE,
MT_TX_RING_BASE, 0); MT_TX_RING_BASE, NULL, 0);
if (ret) if (ret)
return ret; return ret;
......
...@@ -218,7 +218,7 @@ static int mt7925_dma_init(struct mt792x_dev *dev) ...@@ -218,7 +218,7 @@ static int mt7925_dma_init(struct mt792x_dev *dev)
/* init tx queue */ /* init tx queue */
ret = mt76_connac_init_tx_queues(dev->phy.mt76, MT7925_TXQ_BAND0, ret = mt76_connac_init_tx_queues(dev->phy.mt76, MT7925_TXQ_BAND0,
MT7925_TX_RING_SIZE, MT7925_TX_RING_SIZE,
MT_TX_RING_BASE, 0); MT_TX_RING_BASE, NULL, 0);
if (ret) if (ret)
return ret; return ret;
......
...@@ -268,7 +268,7 @@ int mt7996_dma_init(struct mt7996_dev *dev) ...@@ -268,7 +268,7 @@ int mt7996_dma_init(struct mt7996_dev *dev)
ret = mt76_connac_init_tx_queues(dev->phy.mt76, ret = mt76_connac_init_tx_queues(dev->phy.mt76,
MT_TXQ_ID(dev->mphy.band_idx), MT_TXQ_ID(dev->mphy.band_idx),
MT7996_TX_RING_SIZE, MT7996_TX_RING_SIZE,
MT_TXQ_RING_BASE(0), 0); MT_TXQ_RING_BASE(0), NULL, 0);
if (ret) if (ret)
return ret; return ret;
......
...@@ -405,7 +405,8 @@ static int mt7996_register_phy(struct mt7996_dev *dev, struct mt7996_phy *phy, ...@@ -405,7 +405,8 @@ static int mt7996_register_phy(struct mt7996_dev *dev, struct mt7996_phy *phy,
ret = mt76_connac_init_tx_queues(phy->mt76, ret = mt76_connac_init_tx_queues(phy->mt76,
MT_TXQ_ID(band), MT_TXQ_ID(band),
MT7996_TX_RING_SIZE, MT7996_TX_RING_SIZE,
MT_TXQ_RING_BASE(band) + hif1_ofs, 0); MT_TXQ_RING_BASE(band) + hif1_ofs,
NULL, 0);
if (ret) if (ret)
goto error; goto error;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment