Commit d1ddc536 authored by Felix Fietkau's avatar Felix Fietkau

mt76: add support for overriding the device used for DMA mapping

WED support requires using non-coherent DMA, whereas the PCI device might
be configured for coherent DMA.
The WED driver will take care of changing the PCI HIF coherent IO setting
on attach.
Signed-off-by: default avatarFelix Fietkau <nbd@nbd.name>
parent cc9fd945
...@@ -24,7 +24,7 @@ mt76_alloc_txwi(struct mt76_dev *dev) ...@@ -24,7 +24,7 @@ mt76_alloc_txwi(struct mt76_dev *dev)
if (!txwi) if (!txwi)
return NULL; return NULL;
addr = dma_map_single(dev->dev, txwi, dev->drv->txwi_size, addr = dma_map_single(dev->dma_dev, txwi, dev->drv->txwi_size,
DMA_TO_DEVICE); DMA_TO_DEVICE);
t = (struct mt76_txwi_cache *)(txwi + dev->drv->txwi_size); t = (struct mt76_txwi_cache *)(txwi + dev->drv->txwi_size);
t->dma_addr = addr; t->dma_addr = addr;
...@@ -78,7 +78,7 @@ mt76_free_pending_txwi(struct mt76_dev *dev) ...@@ -78,7 +78,7 @@ mt76_free_pending_txwi(struct mt76_dev *dev)
local_bh_disable(); local_bh_disable();
while ((t = __mt76_get_txwi(dev)) != NULL) { while ((t = __mt76_get_txwi(dev)) != NULL) {
dma_unmap_single(dev->dev, t->dma_addr, dev->drv->txwi_size, dma_unmap_single(dev->dma_dev, t->dma_addr, dev->drv->txwi_size,
DMA_TO_DEVICE); DMA_TO_DEVICE);
kfree(mt76_get_txwi_ptr(dev, t)); kfree(mt76_get_txwi_ptr(dev, t));
} }
...@@ -127,7 +127,7 @@ mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q, ...@@ -127,7 +127,7 @@ mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q,
q->hw_idx = idx; q->hw_idx = idx;
size = q->ndesc * sizeof(struct mt76_desc); size = q->ndesc * sizeof(struct mt76_desc);
q->desc = dmam_alloc_coherent(dev->dev, size, &q->desc_dma, GFP_KERNEL); q->desc = dmam_alloc_coherent(dev->dma_dev, size, &q->desc_dma, GFP_KERNEL);
if (!q->desc) if (!q->desc)
return -ENOMEM; return -ENOMEM;
...@@ -209,11 +209,11 @@ mt76_dma_tx_cleanup_idx(struct mt76_dev *dev, struct mt76_queue *q, int idx, ...@@ -209,11 +209,11 @@ mt76_dma_tx_cleanup_idx(struct mt76_dev *dev, struct mt76_queue *q, int idx,
struct mt76_queue_entry *e = &q->entry[idx]; struct mt76_queue_entry *e = &q->entry[idx];
if (!e->skip_buf0) if (!e->skip_buf0)
dma_unmap_single(dev->dev, e->dma_addr[0], e->dma_len[0], dma_unmap_single(dev->dma_dev, e->dma_addr[0], e->dma_len[0],
DMA_TO_DEVICE); DMA_TO_DEVICE);
if (!e->skip_buf1) if (!e->skip_buf1)
dma_unmap_single(dev->dev, e->dma_addr[1], e->dma_len[1], dma_unmap_single(dev->dma_dev, e->dma_addr[1], e->dma_len[1],
DMA_TO_DEVICE); DMA_TO_DEVICE);
if (e->txwi == DMA_DUMMY_DATA) if (e->txwi == DMA_DUMMY_DATA)
...@@ -293,7 +293,7 @@ mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx, ...@@ -293,7 +293,7 @@ mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx,
if (info) if (info)
*info = le32_to_cpu(desc->info); *info = le32_to_cpu(desc->info);
dma_unmap_single(dev->dev, buf_addr, buf_len, DMA_FROM_DEVICE); dma_unmap_single(dev->dma_dev, buf_addr, buf_len, DMA_FROM_DEVICE);
e->buf = NULL; e->buf = NULL;
return buf; return buf;
...@@ -330,9 +330,9 @@ mt76_dma_tx_queue_skb_raw(struct mt76_dev *dev, struct mt76_queue *q, ...@@ -330,9 +330,9 @@ mt76_dma_tx_queue_skb_raw(struct mt76_dev *dev, struct mt76_queue *q,
if (q->queued + 1 >= q->ndesc - 1) if (q->queued + 1 >= q->ndesc - 1)
goto error; goto error;
addr = dma_map_single(dev->dev, skb->data, skb->len, addr = dma_map_single(dev->dma_dev, skb->data, skb->len,
DMA_TO_DEVICE); DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(dev->dev, addr))) if (unlikely(dma_mapping_error(dev->dma_dev, addr)))
goto error; goto error;
buf.addr = addr; buf.addr = addr;
...@@ -379,8 +379,8 @@ mt76_dma_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q, ...@@ -379,8 +379,8 @@ mt76_dma_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
mt76_insert_hdr_pad(skb); mt76_insert_hdr_pad(skb);
len = skb_headlen(skb); len = skb_headlen(skb);
addr = dma_map_single(dev->dev, skb->data, len, DMA_TO_DEVICE); addr = dma_map_single(dev->dma_dev, skb->data, len, DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(dev->dev, addr))) if (unlikely(dma_mapping_error(dev->dma_dev, addr)))
goto free; goto free;
tx_info.buf[n].addr = t->dma_addr; tx_info.buf[n].addr = t->dma_addr;
...@@ -392,9 +392,9 @@ mt76_dma_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q, ...@@ -392,9 +392,9 @@ mt76_dma_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
if (n == ARRAY_SIZE(tx_info.buf)) if (n == ARRAY_SIZE(tx_info.buf))
goto unmap; goto unmap;
addr = dma_map_single(dev->dev, iter->data, iter->len, addr = dma_map_single(dev->dma_dev, iter->data, iter->len,
DMA_TO_DEVICE); DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(dev->dev, addr))) if (unlikely(dma_mapping_error(dev->dma_dev, addr)))
goto unmap; goto unmap;
tx_info.buf[n].addr = addr; tx_info.buf[n].addr = addr;
...@@ -407,10 +407,10 @@ mt76_dma_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q, ...@@ -407,10 +407,10 @@ mt76_dma_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
goto unmap; goto unmap;
} }
dma_sync_single_for_cpu(dev->dev, t->dma_addr, dev->drv->txwi_size, dma_sync_single_for_cpu(dev->dma_dev, t->dma_addr, dev->drv->txwi_size,
DMA_TO_DEVICE); DMA_TO_DEVICE);
ret = dev->drv->tx_prepare_skb(dev, txwi, q->qid, wcid, sta, &tx_info); ret = dev->drv->tx_prepare_skb(dev, txwi, q->qid, wcid, sta, &tx_info);
dma_sync_single_for_device(dev->dev, t->dma_addr, dev->drv->txwi_size, dma_sync_single_for_device(dev->dma_dev, t->dma_addr, dev->drv->txwi_size,
DMA_TO_DEVICE); DMA_TO_DEVICE);
if (ret < 0) if (ret < 0)
goto unmap; goto unmap;
...@@ -420,7 +420,7 @@ mt76_dma_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q, ...@@ -420,7 +420,7 @@ mt76_dma_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
unmap: unmap:
for (n--; n > 0; n--) for (n--; n > 0; n--)
dma_unmap_single(dev->dev, tx_info.buf[n].addr, dma_unmap_single(dev->dma_dev, tx_info.buf[n].addr,
tx_info.buf[n].len, DMA_TO_DEVICE); tx_info.buf[n].len, DMA_TO_DEVICE);
free: free:
...@@ -465,8 +465,8 @@ mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q) ...@@ -465,8 +465,8 @@ mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q)
if (!buf) if (!buf)
break; break;
addr = dma_map_single(dev->dev, buf, len, DMA_FROM_DEVICE); addr = dma_map_single(dev->dma_dev, buf, len, DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(dev->dev, addr))) { if (unlikely(dma_mapping_error(dev->dma_dev, addr))) {
skb_free_frag(buf); skb_free_frag(buf);
break; break;
} }
......
...@@ -545,6 +545,7 @@ mt76_alloc_device(struct device *pdev, unsigned int size, ...@@ -545,6 +545,7 @@ mt76_alloc_device(struct device *pdev, unsigned int size,
dev->hw = hw; dev->hw = hw;
dev->dev = pdev; dev->dev = pdev;
dev->drv = drv_ops; dev->drv = drv_ops;
dev->dma_dev = pdev;
phy = &dev->phy; phy = &dev->phy;
phy->dev = dev; phy->dev = dev;
......
...@@ -698,6 +698,7 @@ struct mt76_dev { ...@@ -698,6 +698,7 @@ struct mt76_dev {
const struct mt76_driver_ops *drv; const struct mt76_driver_ops *drv;
const struct mt76_mcu_ops *mcu_ops; const struct mt76_mcu_ops *mcu_ops;
struct device *dev; struct device *dev;
struct device *dma_dev;
struct mt76_mcu mcu; struct mt76_mcu mcu;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment