Commit 27d5c528 authored by Felix Fietkau's avatar Felix Fietkau

mt76: fix double DMA unmap of the first buffer on 7615/7915

A small part of the first skb buffer is passed to the firmware for parsing
via DMA, while the full buffer is passed as part of the TXP.

Avoid calling DMA unmap on the first part (with a different length than map)
Signed-off-by: default avatarFelix Fietkau <nbd@nbd.name>
parent c12b7c79
...@@ -61,10 +61,16 @@ mt76_dma_add_buf(struct mt76_dev *dev, struct mt76_queue *q, ...@@ -61,10 +61,16 @@ mt76_dma_add_buf(struct mt76_dev *dev, struct mt76_queue *q,
for (i = 0; i < nbufs; i += 2, buf += 2) { for (i = 0; i < nbufs; i += 2, buf += 2) {
u32 buf0 = buf[0].addr, buf1 = 0; u32 buf0 = buf[0].addr, buf1 = 0;
if (buf[0].skip_unmap)
q->entry[q->head].skip_buf0 = true;
q->entry[q->head].skip_buf1 = i == nbufs - 1;
ctrl = FIELD_PREP(MT_DMA_CTL_SD_LEN0, buf[0].len); ctrl = FIELD_PREP(MT_DMA_CTL_SD_LEN0, buf[0].len);
if (i < nbufs - 1) { if (i < nbufs - 1) {
buf1 = buf[1].addr; buf1 = buf[1].addr;
ctrl |= FIELD_PREP(MT_DMA_CTL_SD_LEN1, buf[1].len); ctrl |= FIELD_PREP(MT_DMA_CTL_SD_LEN1, buf[1].len);
if (buf[1].skip_unmap)
q->entry[q->head].skip_buf1 = true;
} }
if (i == nbufs - 1) if (i == nbufs - 1)
...@@ -107,7 +113,7 @@ mt76_dma_tx_cleanup_idx(struct mt76_dev *dev, struct mt76_queue *q, int idx, ...@@ -107,7 +113,7 @@ mt76_dma_tx_cleanup_idx(struct mt76_dev *dev, struct mt76_queue *q, int idx,
DMA_TO_DEVICE); DMA_TO_DEVICE);
} }
if (!(ctrl & MT_DMA_CTL_LAST_SEC0)) { if (!e->skip_buf1) {
__le32 addr = READ_ONCE(q->desc[idx].buf1); __le32 addr = READ_ONCE(q->desc[idx].buf1);
u32 len = FIELD_GET(MT_DMA_CTL_SD_LEN1, ctrl); u32 len = FIELD_GET(MT_DMA_CTL_SD_LEN1, ctrl);
......
...@@ -79,7 +79,8 @@ enum mt76_rxq_id { ...@@ -79,7 +79,8 @@ enum mt76_rxq_id {
struct mt76_queue_buf { struct mt76_queue_buf {
dma_addr_t addr; dma_addr_t addr;
int len; u16 len;
bool skip_unmap;
}; };
struct mt76_tx_info { struct mt76_tx_info {
...@@ -101,6 +102,7 @@ struct mt76_queue_entry { ...@@ -101,6 +102,7 @@ struct mt76_queue_entry {
}; };
enum mt76_txq_id qid; enum mt76_txq_id qid;
bool skip_buf0:1; bool skip_buf0:1;
bool skip_buf1:1;
bool schedule:1; bool schedule:1;
bool done:1; bool done:1;
}; };
......
...@@ -107,6 +107,7 @@ mt7615_write_fw_txp(struct mt7615_dev *dev, struct mt76_tx_info *tx_info, ...@@ -107,6 +107,7 @@ mt7615_write_fw_txp(struct mt7615_dev *dev, struct mt76_tx_info *tx_info,
/* pass partial skb header to fw */ /* pass partial skb header to fw */
tx_info->buf[0].len = MT_TXD_SIZE + sizeof(*txp); tx_info->buf[0].len = MT_TXD_SIZE + sizeof(*txp);
tx_info->buf[1].len = MT_CT_PARSE_LEN; tx_info->buf[1].len = MT_CT_PARSE_LEN;
tx_info->buf[1].skip_unmap = true;
tx_info->nbuf = MT_CT_DMA_BUF_NUM; tx_info->nbuf = MT_CT_DMA_BUF_NUM;
txp->flags = cpu_to_le16(MT_CT_INFO_APPLY_TXD); txp->flags = cpu_to_le16(MT_CT_INFO_APPLY_TXD);
......
...@@ -715,6 +715,7 @@ int mt7915_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr, ...@@ -715,6 +715,7 @@ int mt7915_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
/* pass partial skb header to fw */ /* pass partial skb header to fw */
tx_info->buf[1].len = MT_CT_PARSE_LEN; tx_info->buf[1].len = MT_CT_PARSE_LEN;
tx_info->buf[1].skip_unmap = true;
tx_info->nbuf = MT_CT_DMA_BUF_NUM; tx_info->nbuf = MT_CT_DMA_BUF_NUM;
txp->flags = cpu_to_le16(MT_CT_INFO_APPLY_TXD); txp->flags = cpu_to_le16(MT_CT_INFO_APPLY_TXD);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment