Commit ebe9e651 authored by Christophe JAILLET's avatar Christophe JAILLET Committed by Kalle Valo

intel: switch from 'pci_' to 'dma_' API

The wrappers in include/linux/pci-dma-compat.h should go away.

The patch has been generated with the coccinelle script below.

It has been hand modified to use 'dma_set_mask_and_coherent()' instead of
'pci_set_dma_mask()/pci_set_consistent_dma_mask()' when applicable.
This is less verbose.

It has been compile tested.

@@
@@
-    PCI_DMA_BIDIRECTIONAL
+    DMA_BIDIRECTIONAL

@@
@@
-    PCI_DMA_TODEVICE
+    DMA_TO_DEVICE

@@
@@
-    PCI_DMA_FROMDEVICE
+    DMA_FROM_DEVICE

@@
@@
-    PCI_DMA_NONE
+    DMA_NONE

@@
expression e1, e2, e3;
@@
-    pci_alloc_consistent(e1, e2, e3)
+    dma_alloc_coherent(&e1->dev, e2, e3, GFP_)

@@
expression e1, e2, e3;
@@
-    pci_zalloc_consistent(e1, e2, e3)
+    dma_alloc_coherent(&e1->dev, e2, e3, GFP_)

@@
expression e1, e2, e3, e4;
@@
-    pci_free_consistent(e1, e2, e3, e4)
+    dma_free_coherent(&e1->dev, e2, e3, e4)

@@
expression e1, e2, e3, e4;
@@
-    pci_map_single(e1, e2, e3, e4)
+    dma_map_single(&e1->dev, e2, e3, e4)

@@
expression e1, e2, e3, e4;
@@
-    pci_unmap_single(e1, e2, e3, e4)
+    dma_unmap_single(&e1->dev, e2, e3, e4)

@@
expression e1, e2, e3, e4, e5;
@@
-    pci_map_page(e1, e2, e3, e4, e5)
+    dma_map_page(&e1->dev, e2, e3, e4, e5)

@@
expression e1, e2, e3, e4;
@@
-    pci_unmap_page(e1, e2, e3, e4)
+    dma_unmap_page(&e1->dev, e2, e3, e4)

@@
expression e1, e2, e3, e4;
@@
-    pci_map_sg(e1, e2, e3, e4)
+    dma_map_sg(&e1->dev, e2, e3, e4)

@@
expression e1, e2, e3, e4;
@@
-    pci_unmap_sg(e1, e2, e3, e4)
+    dma_unmap_sg(&e1->dev, e2, e3, e4)

@@
expression e1, e2, e3, e4;
@@
-    pci_dma_sync_single_for_cpu(e1, e2, e3, e4)
+    dma_sync_single_for_cpu(&e1->dev, e2, e3, e4)

@@
expression e1, e2, e3, e4;
@@
-    pci_dma_sync_single_for_device(e1, e2, e3, e4)
+    dma_sync_single_for_device(&e1->dev, e2, e3, e4)

@@
expression e1, e2, e3, e4;
@@
-    pci_dma_sync_sg_for_cpu(e1, e2, e3, e4)
+    dma_sync_sg_for_cpu(&e1->dev, e2, e3, e4)

@@
expression e1, e2, e3, e4;
@@
-    pci_dma_sync_sg_for_device(e1, e2, e3, e4)
+    dma_sync_sg_for_device(&e1->dev, e2, e3, e4)

@@
expression e1, e2;
@@
-    pci_dma_mapping_error(e1, e2)
+    dma_mapping_error(&e1->dev, e2)

@@
expression e1, e2;
@@
-    pci_set_dma_mask(e1, e2)
+    dma_set_mask(&e1->dev, e2)

@@
expression e1, e2;
@@
-    pci_set_consistent_dma_mask(e1, e2)
+    dma_set_coherent_mask(&e1->dev, e2)
Signed-off-by: default avatarChristophe JAILLET <christophe.jaillet@wanadoo.fr>
Signed-off-by: default avatarKalle Valo <kvalo@codeaurora.org>
Link: https://lore.kernel.org/r/f55043d0c847bfae60087707778563cf732a7bf9.1629619229.git.christophe.jaillet@wanadoo.fr
parent a847666a
...@@ -571,20 +571,18 @@ il3945_tx_skb(struct il_priv *il, ...@@ -571,20 +571,18 @@ il3945_tx_skb(struct il_priv *il,
/* Physical address of this Tx command's header (not MAC header!), /* Physical address of this Tx command's header (not MAC header!),
* within command buffer array. */ * within command buffer array. */
txcmd_phys = txcmd_phys = dma_map_single(&il->pci_dev->dev, &out_cmd->hdr, firstlen,
pci_map_single(il->pci_dev, &out_cmd->hdr, firstlen, DMA_TO_DEVICE);
PCI_DMA_TODEVICE); if (unlikely(dma_mapping_error(&il->pci_dev->dev, txcmd_phys)))
if (unlikely(pci_dma_mapping_error(il->pci_dev, txcmd_phys)))
goto drop_unlock; goto drop_unlock;
/* Set up TFD's 2nd entry to point directly to remainder of skb, /* Set up TFD's 2nd entry to point directly to remainder of skb,
* if any (802.11 null frames have no payload). */ * if any (802.11 null frames have no payload). */
secondlen = skb->len - hdr_len; secondlen = skb->len - hdr_len;
if (secondlen > 0) { if (secondlen > 0) {
phys_addr = phys_addr = dma_map_single(&il->pci_dev->dev, skb->data + hdr_len,
pci_map_single(il->pci_dev, skb->data + hdr_len, secondlen, secondlen, DMA_TO_DEVICE);
PCI_DMA_TODEVICE); if (unlikely(dma_mapping_error(&il->pci_dev->dev, phys_addr)))
if (unlikely(pci_dma_mapping_error(il->pci_dev, phys_addr)))
goto drop_unlock; goto drop_unlock;
} }
...@@ -1015,11 +1013,11 @@ il3945_rx_allocate(struct il_priv *il, gfp_t priority) ...@@ -1015,11 +1013,11 @@ il3945_rx_allocate(struct il_priv *il, gfp_t priority)
/* Get physical address of RB/SKB */ /* Get physical address of RB/SKB */
page_dma = page_dma =
pci_map_page(il->pci_dev, page, 0, dma_map_page(&il->pci_dev->dev, page, 0,
PAGE_SIZE << il->hw_params.rx_page_order, PAGE_SIZE << il->hw_params.rx_page_order,
PCI_DMA_FROMDEVICE); DMA_FROM_DEVICE);
if (unlikely(pci_dma_mapping_error(il->pci_dev, page_dma))) { if (unlikely(dma_mapping_error(&il->pci_dev->dev, page_dma))) {
__free_pages(page, il->hw_params.rx_page_order); __free_pages(page, il->hw_params.rx_page_order);
break; break;
} }
...@@ -1028,9 +1026,9 @@ il3945_rx_allocate(struct il_priv *il, gfp_t priority) ...@@ -1028,9 +1026,9 @@ il3945_rx_allocate(struct il_priv *il, gfp_t priority)
if (list_empty(&rxq->rx_used)) { if (list_empty(&rxq->rx_used)) {
spin_unlock_irqrestore(&rxq->lock, flags); spin_unlock_irqrestore(&rxq->lock, flags);
pci_unmap_page(il->pci_dev, page_dma, dma_unmap_page(&il->pci_dev->dev, page_dma,
PAGE_SIZE << il->hw_params.rx_page_order, PAGE_SIZE << il->hw_params.rx_page_order,
PCI_DMA_FROMDEVICE); DMA_FROM_DEVICE);
__free_pages(page, il->hw_params.rx_page_order); __free_pages(page, il->hw_params.rx_page_order);
return; return;
} }
...@@ -1062,9 +1060,10 @@ il3945_rx_queue_reset(struct il_priv *il, struct il_rx_queue *rxq) ...@@ -1062,9 +1060,10 @@ il3945_rx_queue_reset(struct il_priv *il, struct il_rx_queue *rxq)
/* In the reset function, these buffers may have been allocated /* In the reset function, these buffers may have been allocated
* to an SKB, so we need to unmap and free potential storage */ * to an SKB, so we need to unmap and free potential storage */
if (rxq->pool[i].page != NULL) { if (rxq->pool[i].page != NULL) {
pci_unmap_page(il->pci_dev, rxq->pool[i].page_dma, dma_unmap_page(&il->pci_dev->dev,
rxq->pool[i].page_dma,
PAGE_SIZE << il->hw_params.rx_page_order, PAGE_SIZE << il->hw_params.rx_page_order,
PCI_DMA_FROMDEVICE); DMA_FROM_DEVICE);
__il_free_pages(il, rxq->pool[i].page); __il_free_pages(il, rxq->pool[i].page);
rxq->pool[i].page = NULL; rxq->pool[i].page = NULL;
} }
...@@ -1111,9 +1110,10 @@ il3945_rx_queue_free(struct il_priv *il, struct il_rx_queue *rxq) ...@@ -1111,9 +1110,10 @@ il3945_rx_queue_free(struct il_priv *il, struct il_rx_queue *rxq)
int i; int i;
for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) { for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
if (rxq->pool[i].page != NULL) { if (rxq->pool[i].page != NULL) {
pci_unmap_page(il->pci_dev, rxq->pool[i].page_dma, dma_unmap_page(&il->pci_dev->dev,
rxq->pool[i].page_dma,
PAGE_SIZE << il->hw_params.rx_page_order, PAGE_SIZE << il->hw_params.rx_page_order,
PCI_DMA_FROMDEVICE); DMA_FROM_DEVICE);
__il_free_pages(il, rxq->pool[i].page); __il_free_pages(il, rxq->pool[i].page);
rxq->pool[i].page = NULL; rxq->pool[i].page = NULL;
} }
...@@ -1213,9 +1213,9 @@ il3945_rx_handle(struct il_priv *il) ...@@ -1213,9 +1213,9 @@ il3945_rx_handle(struct il_priv *il)
rxq->queue[i] = NULL; rxq->queue[i] = NULL;
pci_unmap_page(il->pci_dev, rxb->page_dma, dma_unmap_page(&il->pci_dev->dev, rxb->page_dma,
PAGE_SIZE << il->hw_params.rx_page_order, PAGE_SIZE << il->hw_params.rx_page_order,
PCI_DMA_FROMDEVICE); DMA_FROM_DEVICE);
pkt = rxb_addr(rxb); pkt = rxb_addr(rxb);
len = le32_to_cpu(pkt->len_n_flags) & IL_RX_FRAME_SIZE_MSK; len = le32_to_cpu(pkt->len_n_flags) & IL_RX_FRAME_SIZE_MSK;
...@@ -1260,11 +1260,11 @@ il3945_rx_handle(struct il_priv *il) ...@@ -1260,11 +1260,11 @@ il3945_rx_handle(struct il_priv *il)
spin_lock_irqsave(&rxq->lock, flags); spin_lock_irqsave(&rxq->lock, flags);
if (rxb->page != NULL) { if (rxb->page != NULL) {
rxb->page_dma = rxb->page_dma =
pci_map_page(il->pci_dev, rxb->page, 0, dma_map_page(&il->pci_dev->dev, rxb->page, 0,
PAGE_SIZE << il->hw_params. PAGE_SIZE << il->hw_params.rx_page_order,
rx_page_order, PCI_DMA_FROMDEVICE); DMA_FROM_DEVICE);
if (unlikely(pci_dma_mapping_error(il->pci_dev, if (unlikely(dma_mapping_error(&il->pci_dev->dev,
rxb->page_dma))) { rxb->page_dma))) {
__il_free_pages(il, rxb->page); __il_free_pages(il, rxb->page);
rxb->page = NULL; rxb->page = NULL;
list_add_tail(&rxb->list, &rxq->rx_used); list_add_tail(&rxb->list, &rxq->rx_used);
...@@ -3616,9 +3616,7 @@ il3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -3616,9 +3616,7 @@ il3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_master(pdev); pci_set_master(pdev);
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (!err)
err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
if (err) { if (err) {
IL_WARN("No suitable DMA available.\n"); IL_WARN("No suitable DMA available.\n");
goto out_pci_disable_device; goto out_pci_disable_device;
......
...@@ -652,16 +652,16 @@ il3945_hw_txq_free_tfd(struct il_priv *il, struct il_tx_queue *txq) ...@@ -652,16 +652,16 @@ il3945_hw_txq_free_tfd(struct il_priv *il, struct il_tx_queue *txq)
/* Unmap tx_cmd */ /* Unmap tx_cmd */
if (counter) if (counter)
pci_unmap_single(dev, dma_unmap_addr(&txq->meta[idx], mapping), dma_unmap_single(&dev->dev,
dma_unmap_addr(&txq->meta[idx], mapping),
dma_unmap_len(&txq->meta[idx], len), dma_unmap_len(&txq->meta[idx], len),
PCI_DMA_TODEVICE); DMA_TO_DEVICE);
/* unmap chunks if any */ /* unmap chunks if any */
for (i = 1; i < counter; i++) for (i = 1; i < counter; i++)
pci_unmap_single(dev, le32_to_cpu(tfd->tbs[i].addr), dma_unmap_single(&dev->dev, le32_to_cpu(tfd->tbs[i].addr),
le32_to_cpu(tfd->tbs[i].len), le32_to_cpu(tfd->tbs[i].len), DMA_TO_DEVICE);
PCI_DMA_TODEVICE);
/* free SKB */ /* free SKB */
if (txq->skbs) { if (txq->skbs) {
......
...@@ -94,9 +94,10 @@ il4965_rx_queue_reset(struct il_priv *il, struct il_rx_queue *rxq) ...@@ -94,9 +94,10 @@ il4965_rx_queue_reset(struct il_priv *il, struct il_rx_queue *rxq)
/* In the reset function, these buffers may have been allocated /* In the reset function, these buffers may have been allocated
* to an SKB, so we need to unmap and free potential storage */ * to an SKB, so we need to unmap and free potential storage */
if (rxq->pool[i].page != NULL) { if (rxq->pool[i].page != NULL) {
pci_unmap_page(il->pci_dev, rxq->pool[i].page_dma, dma_unmap_page(&il->pci_dev->dev,
rxq->pool[i].page_dma,
PAGE_SIZE << il->hw_params.rx_page_order, PAGE_SIZE << il->hw_params.rx_page_order,
PCI_DMA_FROMDEVICE); DMA_FROM_DEVICE);
__il_free_pages(il, rxq->pool[i].page); __il_free_pages(il, rxq->pool[i].page);
rxq->pool[i].page = NULL; rxq->pool[i].page = NULL;
} }
...@@ -342,11 +343,10 @@ il4965_rx_allocate(struct il_priv *il, gfp_t priority) ...@@ -342,11 +343,10 @@ il4965_rx_allocate(struct il_priv *il, gfp_t priority)
} }
/* Get physical address of the RB */ /* Get physical address of the RB */
page_dma = page_dma = dma_map_page(&il->pci_dev->dev, page, 0,
pci_map_page(il->pci_dev, page, 0, PAGE_SIZE << il->hw_params.rx_page_order,
PAGE_SIZE << il->hw_params.rx_page_order, DMA_FROM_DEVICE);
PCI_DMA_FROMDEVICE); if (unlikely(dma_mapping_error(&il->pci_dev->dev, page_dma))) {
if (unlikely(pci_dma_mapping_error(il->pci_dev, page_dma))) {
__free_pages(page, il->hw_params.rx_page_order); __free_pages(page, il->hw_params.rx_page_order);
break; break;
} }
...@@ -355,9 +355,9 @@ il4965_rx_allocate(struct il_priv *il, gfp_t priority) ...@@ -355,9 +355,9 @@ il4965_rx_allocate(struct il_priv *il, gfp_t priority)
if (list_empty(&rxq->rx_used)) { if (list_empty(&rxq->rx_used)) {
spin_unlock_irqrestore(&rxq->lock, flags); spin_unlock_irqrestore(&rxq->lock, flags);
pci_unmap_page(il->pci_dev, page_dma, dma_unmap_page(&il->pci_dev->dev, page_dma,
PAGE_SIZE << il->hw_params.rx_page_order, PAGE_SIZE << il->hw_params.rx_page_order,
PCI_DMA_FROMDEVICE); DMA_FROM_DEVICE);
__free_pages(page, il->hw_params.rx_page_order); __free_pages(page, il->hw_params.rx_page_order);
return; return;
} }
...@@ -409,9 +409,10 @@ il4965_rx_queue_free(struct il_priv *il, struct il_rx_queue *rxq) ...@@ -409,9 +409,10 @@ il4965_rx_queue_free(struct il_priv *il, struct il_rx_queue *rxq)
int i; int i;
for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) { for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
if (rxq->pool[i].page != NULL) { if (rxq->pool[i].page != NULL) {
pci_unmap_page(il->pci_dev, rxq->pool[i].page_dma, dma_unmap_page(&il->pci_dev->dev,
rxq->pool[i].page_dma,
PAGE_SIZE << il->hw_params.rx_page_order, PAGE_SIZE << il->hw_params.rx_page_order,
PCI_DMA_FROMDEVICE); DMA_FROM_DEVICE);
__il_free_pages(il, rxq->pool[i].page); __il_free_pages(il, rxq->pool[i].page);
rxq->pool[i].page = NULL; rxq->pool[i].page = NULL;
} }
...@@ -1815,20 +1816,18 @@ il4965_tx_skb(struct il_priv *il, ...@@ -1815,20 +1816,18 @@ il4965_tx_skb(struct il_priv *il,
/* Physical address of this Tx command's header (not MAC header!), /* Physical address of this Tx command's header (not MAC header!),
* within command buffer array. */ * within command buffer array. */
txcmd_phys = txcmd_phys = dma_map_single(&il->pci_dev->dev, &out_cmd->hdr, firstlen,
pci_map_single(il->pci_dev, &out_cmd->hdr, firstlen, DMA_BIDIRECTIONAL);
PCI_DMA_BIDIRECTIONAL); if (unlikely(dma_mapping_error(&il->pci_dev->dev, txcmd_phys)))
if (unlikely(pci_dma_mapping_error(il->pci_dev, txcmd_phys)))
goto drop_unlock; goto drop_unlock;
/* Set up TFD's 2nd entry to point directly to remainder of skb, /* Set up TFD's 2nd entry to point directly to remainder of skb,
* if any (802.11 null frames have no payload). */ * if any (802.11 null frames have no payload). */
secondlen = skb->len - hdr_len; secondlen = skb->len - hdr_len;
if (secondlen > 0) { if (secondlen > 0) {
phys_addr = phys_addr = dma_map_single(&il->pci_dev->dev, skb->data + hdr_len,
pci_map_single(il->pci_dev, skb->data + hdr_len, secondlen, secondlen, DMA_TO_DEVICE);
PCI_DMA_TODEVICE); if (unlikely(dma_mapping_error(&il->pci_dev->dev, phys_addr)))
if (unlikely(pci_dma_mapping_error(il->pci_dev, phys_addr)))
goto drop_unlock; goto drop_unlock;
} }
...@@ -1853,8 +1852,8 @@ il4965_tx_skb(struct il_priv *il, ...@@ -1853,8 +1852,8 @@ il4965_tx_skb(struct il_priv *il,
offsetof(struct il_tx_cmd, scratch); offsetof(struct il_tx_cmd, scratch);
/* take back ownership of DMA buffer to enable update */ /* take back ownership of DMA buffer to enable update */
pci_dma_sync_single_for_cpu(il->pci_dev, txcmd_phys, firstlen, dma_sync_single_for_cpu(&il->pci_dev->dev, txcmd_phys, firstlen,
PCI_DMA_BIDIRECTIONAL); DMA_BIDIRECTIONAL);
tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys); tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
tx_cmd->dram_msb_ptr = il_get_dma_hi_addr(scratch_phys); tx_cmd->dram_msb_ptr = il_get_dma_hi_addr(scratch_phys);
...@@ -1869,8 +1868,8 @@ il4965_tx_skb(struct il_priv *il, ...@@ -1869,8 +1868,8 @@ il4965_tx_skb(struct il_priv *il,
if (info->flags & IEEE80211_TX_CTL_AMPDU) if (info->flags & IEEE80211_TX_CTL_AMPDU)
il->ops->txq_update_byte_cnt_tbl(il, txq, le16_to_cpu(tx_cmd->len)); il->ops->txq_update_byte_cnt_tbl(il, txq, le16_to_cpu(tx_cmd->len));
pci_dma_sync_single_for_device(il->pci_dev, txcmd_phys, firstlen, dma_sync_single_for_device(&il->pci_dev->dev, txcmd_phys, firstlen,
PCI_DMA_BIDIRECTIONAL); DMA_BIDIRECTIONAL);
/* Tell device the write idx *just past* this latest filled TFD */ /* Tell device the write idx *just past* this latest filled TFD */
q->write_ptr = il_queue_inc_wrap(q->write_ptr, q->n_bd); q->write_ptr = il_queue_inc_wrap(q->write_ptr, q->n_bd);
...@@ -3929,15 +3928,15 @@ il4965_hw_txq_free_tfd(struct il_priv *il, struct il_tx_queue *txq) ...@@ -3929,15 +3928,15 @@ il4965_hw_txq_free_tfd(struct il_priv *il, struct il_tx_queue *txq)
/* Unmap tx_cmd */ /* Unmap tx_cmd */
if (num_tbs) if (num_tbs)
pci_unmap_single(dev, dma_unmap_addr(&txq->meta[idx], mapping), dma_unmap_single(&dev->dev,
dma_unmap_addr(&txq->meta[idx], mapping),
dma_unmap_len(&txq->meta[idx], len), dma_unmap_len(&txq->meta[idx], len),
PCI_DMA_BIDIRECTIONAL); DMA_BIDIRECTIONAL);
/* Unmap chunks, if any. */ /* Unmap chunks, if any. */
for (i = 1; i < num_tbs; i++) for (i = 1; i < num_tbs; i++)
pci_unmap_single(dev, il4965_tfd_tb_get_addr(tfd, i), dma_unmap_single(&dev->dev, il4965_tfd_tb_get_addr(tfd, i),
il4965_tfd_tb_get_len(tfd, i), il4965_tfd_tb_get_len(tfd, i), DMA_TO_DEVICE);
PCI_DMA_TODEVICE);
/* free SKB */ /* free SKB */
if (txq->skbs) { if (txq->skbs) {
...@@ -4243,9 +4242,9 @@ il4965_rx_handle(struct il_priv *il) ...@@ -4243,9 +4242,9 @@ il4965_rx_handle(struct il_priv *il)
rxq->queue[i] = NULL; rxq->queue[i] = NULL;
pci_unmap_page(il->pci_dev, rxb->page_dma, dma_unmap_page(&il->pci_dev->dev, rxb->page_dma,
PAGE_SIZE << il->hw_params.rx_page_order, PAGE_SIZE << il->hw_params.rx_page_order,
PCI_DMA_FROMDEVICE); DMA_FROM_DEVICE);
pkt = rxb_addr(rxb); pkt = rxb_addr(rxb);
len = le32_to_cpu(pkt->len_n_flags) & IL_RX_FRAME_SIZE_MSK; len = le32_to_cpu(pkt->len_n_flags) & IL_RX_FRAME_SIZE_MSK;
...@@ -4290,12 +4289,12 @@ il4965_rx_handle(struct il_priv *il) ...@@ -4290,12 +4289,12 @@ il4965_rx_handle(struct il_priv *il)
spin_lock_irqsave(&rxq->lock, flags); spin_lock_irqsave(&rxq->lock, flags);
if (rxb->page != NULL) { if (rxb->page != NULL) {
rxb->page_dma = rxb->page_dma =
pci_map_page(il->pci_dev, rxb->page, 0, dma_map_page(&il->pci_dev->dev, rxb->page, 0,
PAGE_SIZE << il->hw_params. PAGE_SIZE << il->hw_params.rx_page_order,
rx_page_order, PCI_DMA_FROMDEVICE); DMA_FROM_DEVICE);
if (unlikely(pci_dma_mapping_error(il->pci_dev, if (unlikely(dma_mapping_error(&il->pci_dev->dev,
rxb->page_dma))) { rxb->page_dma))) {
__il_free_pages(il, rxb->page); __il_free_pages(il, rxb->page);
rxb->page = NULL; rxb->page = NULL;
list_add_tail(&rxb->list, &rxq->rx_used); list_add_tail(&rxb->list, &rxq->rx_used);
...@@ -6514,14 +6513,9 @@ il4965_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -6514,14 +6513,9 @@ il4965_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_master(pdev); pci_set_master(pdev);
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36)); err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(36));
if (!err)
err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(36));
if (err) { if (err) {
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (!err)
err =
pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
/* both attempts failed: */ /* both attempts failed: */
if (err) { if (err) {
IL_WARN("No suitable DMA available.\n"); IL_WARN("No suitable DMA available.\n");
......
...@@ -2819,10 +2819,10 @@ il_cmd_queue_unmap(struct il_priv *il) ...@@ -2819,10 +2819,10 @@ il_cmd_queue_unmap(struct il_priv *il)
i = il_get_cmd_idx(q, q->read_ptr, 0); i = il_get_cmd_idx(q, q->read_ptr, 0);
if (txq->meta[i].flags & CMD_MAPPED) { if (txq->meta[i].flags & CMD_MAPPED) {
pci_unmap_single(il->pci_dev, dma_unmap_single(&il->pci_dev->dev,
dma_unmap_addr(&txq->meta[i], mapping), dma_unmap_addr(&txq->meta[i], mapping),
dma_unmap_len(&txq->meta[i], len), dma_unmap_len(&txq->meta[i], len),
PCI_DMA_BIDIRECTIONAL); DMA_BIDIRECTIONAL);
txq->meta[i].flags = 0; txq->meta[i].flags = 0;
} }
...@@ -2831,10 +2831,10 @@ il_cmd_queue_unmap(struct il_priv *il) ...@@ -2831,10 +2831,10 @@ il_cmd_queue_unmap(struct il_priv *il)
i = q->n_win; i = q->n_win;
if (txq->meta[i].flags & CMD_MAPPED) { if (txq->meta[i].flags & CMD_MAPPED) {
pci_unmap_single(il->pci_dev, dma_unmap_single(&il->pci_dev->dev,
dma_unmap_addr(&txq->meta[i], mapping), dma_unmap_addr(&txq->meta[i], mapping),
dma_unmap_len(&txq->meta[i], len), dma_unmap_len(&txq->meta[i], len),
PCI_DMA_BIDIRECTIONAL); DMA_BIDIRECTIONAL);
txq->meta[i].flags = 0; txq->meta[i].flags = 0;
} }
} }
...@@ -3197,10 +3197,9 @@ il_enqueue_hcmd(struct il_priv *il, struct il_host_cmd *cmd) ...@@ -3197,10 +3197,9 @@ il_enqueue_hcmd(struct il_priv *il, struct il_host_cmd *cmd)
} }
#endif #endif
phys_addr = phys_addr = dma_map_single(&il->pci_dev->dev, &out_cmd->hdr, fix_size,
pci_map_single(il->pci_dev, &out_cmd->hdr, fix_size, DMA_BIDIRECTIONAL);
PCI_DMA_BIDIRECTIONAL); if (unlikely(dma_mapping_error(&il->pci_dev->dev, phys_addr))) {
if (unlikely(pci_dma_mapping_error(il->pci_dev, phys_addr))) {
idx = -ENOMEM; idx = -ENOMEM;
goto out; goto out;
} }
...@@ -3298,8 +3297,8 @@ il_tx_cmd_complete(struct il_priv *il, struct il_rx_buf *rxb) ...@@ -3298,8 +3297,8 @@ il_tx_cmd_complete(struct il_priv *il, struct il_rx_buf *rxb)
txq->time_stamp = jiffies; txq->time_stamp = jiffies;
pci_unmap_single(il->pci_dev, dma_unmap_addr(meta, mapping), dma_unmap_single(&il->pci_dev->dev, dma_unmap_addr(meta, mapping),
dma_unmap_len(meta, len), PCI_DMA_BIDIRECTIONAL); dma_unmap_len(meta, len), DMA_BIDIRECTIONAL);
/* Input error checking is done when commands are added to queue. */ /* Input error checking is done when commands are added to queue. */
if (meta->flags & CMD_WANT_SKB) { if (meta->flags & CMD_WANT_SKB) {
......
...@@ -3506,15 +3506,9 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, ...@@ -3506,15 +3506,9 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
pci_set_master(pdev); pci_set_master(pdev);
addr_size = trans->txqs.tfd.addr_size; addr_size = trans->txqs.tfd.addr_size;
ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(addr_size)); ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(addr_size));
if (!ret)
ret = pci_set_consistent_dma_mask(pdev,
DMA_BIT_MASK(addr_size));
if (ret) { if (ret) {
ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (!ret)
ret = pci_set_consistent_dma_mask(pdev,
DMA_BIT_MASK(32));
/* both attempts failed: */ /* both attempts failed: */
if (ret) { if (ret) {
dev_err(&pdev->dev, "No suitable DMA available\n"); dev_err(&pdev->dev, "No suitable DMA available\n");
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment