Commit 9ab90179 authored by Christophe JAILLET's avatar Christophe JAILLET Committed by David S. Miller

chelsio: switch from 'pci_' to 'dma_' API

The wrappers in include/linux/pci-dma-compat.h should go away.

The patch has been generated with the coccinelle script below and has been
hand modified to replace GFP_ with a correct flag.
It has been compile tested.

When memory is allocated in 'free_rx_resources()' and
'alloc_tx_resources()' (sge.c) GFP_KERNEL can be used because it is
already used in these functions.

Moreover, they can only be called from a .ndo_open	function. So it is
guarded by the 'rtnl_lock()', which is a mutex.

While at it, a pr_err message in 'init_one()' has been updated accordingly
(s/consistent/coherent).

@@
@@
-    PCI_DMA_BIDIRECTIONAL
+    DMA_BIDIRECTIONAL

@@
@@
-    PCI_DMA_TODEVICE
+    DMA_TO_DEVICE

@@
@@
-    PCI_DMA_FROMDEVICE
+    DMA_FROM_DEVICE

@@
@@
-    PCI_DMA_NONE
+    DMA_NONE

@@
expression e1, e2, e3;
@@
-    pci_alloc_consistent(e1, e2, e3)
+    dma_alloc_coherent(&e1->dev, e2, e3, GFP_)

@@
expression e1, e2, e3;
@@
-    pci_zalloc_consistent(e1, e2, e3)
+    dma_alloc_coherent(&e1->dev, e2, e3, GFP_)

@@
expression e1, e2, e3, e4;
@@
-    pci_free_consistent(e1, e2, e3, e4)
+    dma_free_coherent(&e1->dev, e2, e3, e4)

@@
expression e1, e2, e3, e4;
@@
-    pci_map_single(e1, e2, e3, e4)
+    dma_map_single(&e1->dev, e2, e3, e4)

@@
expression e1, e2, e3, e4;
@@
-    pci_unmap_single(e1, e2, e3, e4)
+    dma_unmap_single(&e1->dev, e2, e3, e4)

@@
expression e1, e2, e3, e4, e5;
@@
-    pci_map_page(e1, e2, e3, e4, e5)
+    dma_map_page(&e1->dev, e2, e3, e4, e5)

@@
expression e1, e2, e3, e4;
@@
-    pci_unmap_page(e1, e2, e3, e4)
+    dma_unmap_page(&e1->dev, e2, e3, e4)

@@
expression e1, e2, e3, e4;
@@
-    pci_map_sg(e1, e2, e3, e4)
+    dma_map_sg(&e1->dev, e2, e3, e4)

@@
expression e1, e2, e3, e4;
@@
-    pci_unmap_sg(e1, e2, e3, e4)
+    dma_unmap_sg(&e1->dev, e2, e3, e4)

@@
expression e1, e2, e3, e4;
@@
-    pci_dma_sync_single_for_cpu(e1, e2, e3, e4)
+    dma_sync_single_for_cpu(&e1->dev, e2, e3, e4)

@@
expression e1, e2, e3, e4;
@@
-    pci_dma_sync_single_for_device(e1, e2, e3, e4)
+    dma_sync_single_for_device(&e1->dev, e2, e3, e4)

@@
expression e1, e2, e3, e4;
@@
-    pci_dma_sync_sg_for_cpu(e1, e2, e3, e4)
+    dma_sync_sg_for_cpu(&e1->dev, e2, e3, e4)

@@
expression e1, e2, e3, e4;
@@
-    pci_dma_sync_sg_for_device(e1, e2, e3, e4)
+    dma_sync_sg_for_device(&e1->dev, e2, e3, e4)

@@
expression e1, e2;
@@
-    pci_dma_mapping_error(e1, e2)
+    dma_mapping_error(&e1->dev, e2)

@@
expression e1, e2;
@@
-    pci_set_dma_mask(e1, e2)
+    dma_set_mask(&e1->dev, e2)

@@
expression e1, e2;
@@
-    pci_set_consistent_dma_mask(e1, e2)
+    dma_set_coherent_mask(&e1->dev, e2)
Signed-off-by: default avatarChristophe JAILLET <christophe.jaillet@wanadoo.fr>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent f6d89dc5
...@@ -997,17 +997,17 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -997,17 +997,17 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
goto out_disable_pdev; goto out_disable_pdev;
} }
if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
pci_using_dac = 1; pci_using_dac = 1;
if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) { if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
pr_err("%s: unable to obtain 64-bit DMA for " pr_err("%s: unable to obtain 64-bit DMA for coherent allocations\n",
"consistent allocations\n", pci_name(pdev)); pci_name(pdev));
err = -ENODEV; err = -ENODEV;
goto out_disable_pdev; goto out_disable_pdev;
} }
} else if ((err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) { } else if ((err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) != 0) {
pr_err("%s: no usable DMA configuration\n", pci_name(pdev)); pr_err("%s: no usable DMA configuration\n", pci_name(pdev));
goto out_disable_pdev; goto out_disable_pdev;
} }
......
...@@ -509,9 +509,8 @@ static void free_freelQ_buffers(struct pci_dev *pdev, struct freelQ *q) ...@@ -509,9 +509,8 @@ static void free_freelQ_buffers(struct pci_dev *pdev, struct freelQ *q)
while (q->credits--) { while (q->credits--) {
struct freelQ_ce *ce = &q->centries[cidx]; struct freelQ_ce *ce = &q->centries[cidx];
pci_unmap_single(pdev, dma_unmap_addr(ce, dma_addr), dma_unmap_single(&pdev->dev, dma_unmap_addr(ce, dma_addr),
dma_unmap_len(ce, dma_len), dma_unmap_len(ce, dma_len), DMA_FROM_DEVICE);
PCI_DMA_FROMDEVICE);
dev_kfree_skb(ce->skb); dev_kfree_skb(ce->skb);
ce->skb = NULL; ce->skb = NULL;
if (++cidx == q->size) if (++cidx == q->size)
...@@ -529,8 +528,8 @@ static void free_rx_resources(struct sge *sge) ...@@ -529,8 +528,8 @@ static void free_rx_resources(struct sge *sge)
if (sge->respQ.entries) { if (sge->respQ.entries) {
size = sizeof(struct respQ_e) * sge->respQ.size; size = sizeof(struct respQ_e) * sge->respQ.size;
pci_free_consistent(pdev, size, sge->respQ.entries, dma_free_coherent(&pdev->dev, size, sge->respQ.entries,
sge->respQ.dma_addr); sge->respQ.dma_addr);
} }
for (i = 0; i < SGE_FREELQ_N; i++) { for (i = 0; i < SGE_FREELQ_N; i++) {
...@@ -542,8 +541,8 @@ static void free_rx_resources(struct sge *sge) ...@@ -542,8 +541,8 @@ static void free_rx_resources(struct sge *sge)
} }
if (q->entries) { if (q->entries) {
size = sizeof(struct freelQ_e) * q->size; size = sizeof(struct freelQ_e) * q->size;
pci_free_consistent(pdev, size, q->entries, dma_free_coherent(&pdev->dev, size, q->entries,
q->dma_addr); q->dma_addr);
} }
} }
} }
...@@ -564,7 +563,8 @@ static int alloc_rx_resources(struct sge *sge, struct sge_params *p) ...@@ -564,7 +563,8 @@ static int alloc_rx_resources(struct sge *sge, struct sge_params *p)
q->size = p->freelQ_size[i]; q->size = p->freelQ_size[i];
q->dma_offset = sge->rx_pkt_pad ? 0 : NET_IP_ALIGN; q->dma_offset = sge->rx_pkt_pad ? 0 : NET_IP_ALIGN;
size = sizeof(struct freelQ_e) * q->size; size = sizeof(struct freelQ_e) * q->size;
q->entries = pci_alloc_consistent(pdev, size, &q->dma_addr); q->entries = dma_alloc_coherent(&pdev->dev, size,
&q->dma_addr, GFP_KERNEL);
if (!q->entries) if (!q->entries)
goto err_no_mem; goto err_no_mem;
...@@ -601,7 +601,8 @@ static int alloc_rx_resources(struct sge *sge, struct sge_params *p) ...@@ -601,7 +601,8 @@ static int alloc_rx_resources(struct sge *sge, struct sge_params *p)
sge->respQ.credits = 0; sge->respQ.credits = 0;
size = sizeof(struct respQ_e) * sge->respQ.size; size = sizeof(struct respQ_e) * sge->respQ.size;
sge->respQ.entries = sge->respQ.entries =
pci_alloc_consistent(pdev, size, &sge->respQ.dma_addr); dma_alloc_coherent(&pdev->dev, size, &sge->respQ.dma_addr,
GFP_KERNEL);
if (!sge->respQ.entries) if (!sge->respQ.entries)
goto err_no_mem; goto err_no_mem;
return 0; return 0;
...@@ -624,9 +625,10 @@ static void free_cmdQ_buffers(struct sge *sge, struct cmdQ *q, unsigned int n) ...@@ -624,9 +625,10 @@ static void free_cmdQ_buffers(struct sge *sge, struct cmdQ *q, unsigned int n)
ce = &q->centries[cidx]; ce = &q->centries[cidx];
while (n--) { while (n--) {
if (likely(dma_unmap_len(ce, dma_len))) { if (likely(dma_unmap_len(ce, dma_len))) {
pci_unmap_single(pdev, dma_unmap_addr(ce, dma_addr), dma_unmap_single(&pdev->dev,
dma_unmap_addr(ce, dma_addr),
dma_unmap_len(ce, dma_len), dma_unmap_len(ce, dma_len),
PCI_DMA_TODEVICE); DMA_TO_DEVICE);
if (q->sop) if (q->sop)
q->sop = 0; q->sop = 0;
} }
...@@ -663,8 +665,8 @@ static void free_tx_resources(struct sge *sge) ...@@ -663,8 +665,8 @@ static void free_tx_resources(struct sge *sge)
} }
if (q->entries) { if (q->entries) {
size = sizeof(struct cmdQ_e) * q->size; size = sizeof(struct cmdQ_e) * q->size;
pci_free_consistent(pdev, size, q->entries, dma_free_coherent(&pdev->dev, size, q->entries,
q->dma_addr); q->dma_addr);
} }
} }
} }
...@@ -689,7 +691,8 @@ static int alloc_tx_resources(struct sge *sge, struct sge_params *p) ...@@ -689,7 +691,8 @@ static int alloc_tx_resources(struct sge *sge, struct sge_params *p)
q->stop_thres = 0; q->stop_thres = 0;
spin_lock_init(&q->lock); spin_lock_init(&q->lock);
size = sizeof(struct cmdQ_e) * q->size; size = sizeof(struct cmdQ_e) * q->size;
q->entries = pci_alloc_consistent(pdev, size, &q->dma_addr); q->entries = dma_alloc_coherent(&pdev->dev, size,
&q->dma_addr, GFP_KERNEL);
if (!q->entries) if (!q->entries)
goto err_no_mem; goto err_no_mem;
...@@ -837,8 +840,8 @@ static void refill_free_list(struct sge *sge, struct freelQ *q) ...@@ -837,8 +840,8 @@ static void refill_free_list(struct sge *sge, struct freelQ *q)
break; break;
skb_reserve(skb, q->dma_offset); skb_reserve(skb, q->dma_offset);
mapping = pci_map_single(pdev, skb->data, dma_len, mapping = dma_map_single(&pdev->dev, skb->data, dma_len,
PCI_DMA_FROMDEVICE); DMA_FROM_DEVICE);
skb_reserve(skb, sge->rx_pkt_pad); skb_reserve(skb, sge->rx_pkt_pad);
ce->skb = skb; ce->skb = skb;
...@@ -1049,15 +1052,15 @@ static inline struct sk_buff *get_packet(struct adapter *adapter, ...@@ -1049,15 +1052,15 @@ static inline struct sk_buff *get_packet(struct adapter *adapter,
goto use_orig_buf; goto use_orig_buf;
skb_put(skb, len); skb_put(skb, len);
pci_dma_sync_single_for_cpu(pdev, dma_sync_single_for_cpu(&pdev->dev,
dma_unmap_addr(ce, dma_addr), dma_unmap_addr(ce, dma_addr),
dma_unmap_len(ce, dma_len), dma_unmap_len(ce, dma_len),
PCI_DMA_FROMDEVICE); DMA_FROM_DEVICE);
skb_copy_from_linear_data(ce->skb, skb->data, len); skb_copy_from_linear_data(ce->skb, skb->data, len);
pci_dma_sync_single_for_device(pdev, dma_sync_single_for_device(&pdev->dev,
dma_unmap_addr(ce, dma_addr), dma_unmap_addr(ce, dma_addr),
dma_unmap_len(ce, dma_len), dma_unmap_len(ce, dma_len),
PCI_DMA_FROMDEVICE); DMA_FROM_DEVICE);
recycle_fl_buf(fl, fl->cidx); recycle_fl_buf(fl, fl->cidx);
return skb; return skb;
} }
...@@ -1068,8 +1071,8 @@ static inline struct sk_buff *get_packet(struct adapter *adapter, ...@@ -1068,8 +1071,8 @@ static inline struct sk_buff *get_packet(struct adapter *adapter,
return NULL; return NULL;
} }
pci_unmap_single(pdev, dma_unmap_addr(ce, dma_addr), dma_unmap_single(&pdev->dev, dma_unmap_addr(ce, dma_addr),
dma_unmap_len(ce, dma_len), PCI_DMA_FROMDEVICE); dma_unmap_len(ce, dma_len), DMA_FROM_DEVICE);
skb = ce->skb; skb = ce->skb;
prefetch(skb->data); prefetch(skb->data);
...@@ -1091,8 +1094,9 @@ static void unexpected_offload(struct adapter *adapter, struct freelQ *fl) ...@@ -1091,8 +1094,9 @@ static void unexpected_offload(struct adapter *adapter, struct freelQ *fl)
struct freelQ_ce *ce = &fl->centries[fl->cidx]; struct freelQ_ce *ce = &fl->centries[fl->cidx];
struct sk_buff *skb = ce->skb; struct sk_buff *skb = ce->skb;
pci_dma_sync_single_for_cpu(adapter->pdev, dma_unmap_addr(ce, dma_addr), dma_sync_single_for_cpu(&adapter->pdev->dev,
dma_unmap_len(ce, dma_len), PCI_DMA_FROMDEVICE); dma_unmap_addr(ce, dma_addr),
dma_unmap_len(ce, dma_len), DMA_FROM_DEVICE);
pr_err("%s: unexpected offload packet, cmd %u\n", pr_err("%s: unexpected offload packet, cmd %u\n",
adapter->name, *skb->data); adapter->name, *skb->data);
recycle_fl_buf(fl, fl->cidx); recycle_fl_buf(fl, fl->cidx);
...@@ -1209,8 +1213,8 @@ static inline void write_tx_descs(struct adapter *adapter, struct sk_buff *skb, ...@@ -1209,8 +1213,8 @@ static inline void write_tx_descs(struct adapter *adapter, struct sk_buff *skb,
e = e1 = &q->entries[pidx]; e = e1 = &q->entries[pidx];
ce = &q->centries[pidx]; ce = &q->centries[pidx];
mapping = pci_map_single(adapter->pdev, skb->data, mapping = dma_map_single(&adapter->pdev->dev, skb->data,
skb_headlen(skb), PCI_DMA_TODEVICE); skb_headlen(skb), DMA_TO_DEVICE);
desc_mapping = mapping; desc_mapping = mapping;
desc_len = skb_headlen(skb); desc_len = skb_headlen(skb);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment