Commit dcc82bb0 authored by Christophe JAILLET's avatar Christophe JAILLET Committed by David S. Miller

net: sun: cassini: switch from 'pci_' to 'dma_' API

The wrappers in include/linux/pci-dma-compat.h should go away.

The patch has been generated with the coccinelle script below and has been
hand modified to replace GFP_ with a correct flag.
It has been compile tested.

When memory is allocated in 'cas_tx_tiny_alloc()', GFP_KERNEL can be used
because a few lines below in its only caller, 'cas_alloc_rxds()', is also
called. This function makes an explicit use of GFP_KERNEL.

When memory is allocated in 'cas_init_one()', GFP_KERNEL can be used
because it is a probe function and no lock is acquired.

@@
@@
-    PCI_DMA_BIDIRECTIONAL
+    DMA_BIDIRECTIONAL

@@
@@
-    PCI_DMA_TODEVICE
+    DMA_TO_DEVICE

@@
@@
-    PCI_DMA_FROMDEVICE
+    DMA_FROM_DEVICE

@@
@@
-    PCI_DMA_NONE
+    DMA_NONE

@@
expression e1, e2, e3;
@@
-    pci_alloc_consistent(e1, e2, e3)
+    dma_alloc_coherent(&e1->dev, e2, e3, GFP_)

@@
expression e1, e2, e3;
@@
-    pci_zalloc_consistent(e1, e2, e3)
+    dma_alloc_coherent(&e1->dev, e2, e3, GFP_)

@@
expression e1, e2, e3, e4;
@@
-    pci_free_consistent(e1, e2, e3, e4)
+    dma_free_coherent(&e1->dev, e2, e3, e4)

@@
expression e1, e2, e3, e4;
@@
-    pci_map_single(e1, e2, e3, e4)
+    dma_map_single(&e1->dev, e2, e3, e4)

@@
expression e1, e2, e3, e4;
@@
-    pci_unmap_single(e1, e2, e3, e4)
+    dma_unmap_single(&e1->dev, e2, e3, e4)

@@
expression e1, e2, e3, e4, e5;
@@
-    pci_map_page(e1, e2, e3, e4, e5)
+    dma_map_page(&e1->dev, e2, e3, e4, e5)

@@
expression e1, e2, e3, e4;
@@
-    pci_unmap_page(e1, e2, e3, e4)
+    dma_unmap_page(&e1->dev, e2, e3, e4)

@@
expression e1, e2, e3, e4;
@@
-    pci_map_sg(e1, e2, e3, e4)
+    dma_map_sg(&e1->dev, e2, e3, e4)

@@
expression e1, e2, e3, e4;
@@
-    pci_unmap_sg(e1, e2, e3, e4)
+    dma_unmap_sg(&e1->dev, e2, e3, e4)

@@
expression e1, e2, e3, e4;
@@
-    pci_dma_sync_single_for_cpu(e1, e2, e3, e4)
+    dma_sync_single_for_cpu(&e1->dev, e2, e3, e4)

@@
expression e1, e2, e3, e4;
@@
-    pci_dma_sync_single_for_device(e1, e2, e3, e4)
+    dma_sync_single_for_device(&e1->dev, e2, e3, e4)

@@
expression e1, e2, e3, e4;
@@
-    pci_dma_sync_sg_for_cpu(e1, e2, e3, e4)
+    dma_sync_sg_for_cpu(&e1->dev, e2, e3, e4)

@@
expression e1, e2, e3, e4;
@@
-    pci_dma_sync_sg_for_device(e1, e2, e3, e4)
+    dma_sync_sg_for_device(&e1->dev, e2, e3, e4)

@@
expression e1, e2;
@@
-    pci_dma_mapping_error(e1, e2)
+    dma_mapping_error(&e1->dev, e2)

@@
expression e1, e2;
@@
-    pci_set_dma_mask(e1, e2)
+    dma_set_mask(&e1->dev, e2)

@@
expression e1, e2;
@@
-    pci_set_consistent_dma_mask(e1, e2)
+    dma_set_coherent_mask(&e1->dev, e2)
Signed-off-by: default avatarChristophe JAILLET <christophe.jaillet@wanadoo.fr>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 8c728940
...@@ -443,8 +443,8 @@ static void cas_phy_powerdown(struct cas *cp) ...@@ -443,8 +443,8 @@ static void cas_phy_powerdown(struct cas *cp)
/* cp->lock held. note: the last put_page will free the buffer */ /* cp->lock held. note: the last put_page will free the buffer */
static int cas_page_free(struct cas *cp, cas_page_t *page) static int cas_page_free(struct cas *cp, cas_page_t *page)
{ {
pci_unmap_page(cp->pdev, page->dma_addr, cp->page_size, dma_unmap_page(&cp->pdev->dev, page->dma_addr, cp->page_size,
PCI_DMA_FROMDEVICE); DMA_FROM_DEVICE);
__free_pages(page->buffer, cp->page_order); __free_pages(page->buffer, cp->page_order);
kfree(page); kfree(page);
return 0; return 0;
...@@ -474,8 +474,8 @@ static cas_page_t *cas_page_alloc(struct cas *cp, const gfp_t flags) ...@@ -474,8 +474,8 @@ static cas_page_t *cas_page_alloc(struct cas *cp, const gfp_t flags)
page->buffer = alloc_pages(flags, cp->page_order); page->buffer = alloc_pages(flags, cp->page_order);
if (!page->buffer) if (!page->buffer)
goto page_err; goto page_err;
page->dma_addr = pci_map_page(cp->pdev, page->buffer, 0, page->dma_addr = dma_map_page(&cp->pdev->dev, page->buffer, 0,
cp->page_size, PCI_DMA_FROMDEVICE); cp->page_size, DMA_FROM_DEVICE);
return page; return page;
page_err: page_err:
...@@ -1863,8 +1863,8 @@ static inline void cas_tx_ringN(struct cas *cp, int ring, int limit) ...@@ -1863,8 +1863,8 @@ static inline void cas_tx_ringN(struct cas *cp, int ring, int limit)
daddr = le64_to_cpu(txd->buffer); daddr = le64_to_cpu(txd->buffer);
dlen = CAS_VAL(TX_DESC_BUFLEN, dlen = CAS_VAL(TX_DESC_BUFLEN,
le64_to_cpu(txd->control)); le64_to_cpu(txd->control));
pci_unmap_page(cp->pdev, daddr, dlen, dma_unmap_page(&cp->pdev->dev, daddr, dlen,
PCI_DMA_TODEVICE); DMA_TO_DEVICE);
entry = TX_DESC_NEXT(ring, entry); entry = TX_DESC_NEXT(ring, entry);
/* tiny buffer may follow */ /* tiny buffer may follow */
...@@ -1957,12 +1957,13 @@ static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc, ...@@ -1957,12 +1957,13 @@ static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc,
i = hlen; i = hlen;
if (!dlen) /* attach FCS */ if (!dlen) /* attach FCS */
i += cp->crc_size; i += cp->crc_size;
pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr + off, i, dma_sync_single_for_cpu(&cp->pdev->dev, page->dma_addr + off,
PCI_DMA_FROMDEVICE); i, DMA_FROM_DEVICE);
addr = cas_page_map(page->buffer); addr = cas_page_map(page->buffer);
memcpy(p, addr + off, i); memcpy(p, addr + off, i);
pci_dma_sync_single_for_device(cp->pdev, page->dma_addr + off, i, dma_sync_single_for_device(&cp->pdev->dev,
PCI_DMA_FROMDEVICE); page->dma_addr + off, i,
DMA_FROM_DEVICE);
cas_page_unmap(addr); cas_page_unmap(addr);
RX_USED_ADD(page, 0x100); RX_USED_ADD(page, 0x100);
p += hlen; p += hlen;
...@@ -1988,16 +1989,17 @@ static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc, ...@@ -1988,16 +1989,17 @@ static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc,
i = hlen; i = hlen;
if (i == dlen) /* attach FCS */ if (i == dlen) /* attach FCS */
i += cp->crc_size; i += cp->crc_size;
pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr + off, i, dma_sync_single_for_cpu(&cp->pdev->dev, page->dma_addr + off,
PCI_DMA_FROMDEVICE); i, DMA_FROM_DEVICE);
/* make sure we always copy a header */ /* make sure we always copy a header */
swivel = 0; swivel = 0;
if (p == (char *) skb->data) { /* not split */ if (p == (char *) skb->data) { /* not split */
addr = cas_page_map(page->buffer); addr = cas_page_map(page->buffer);
memcpy(p, addr + off, RX_COPY_MIN); memcpy(p, addr + off, RX_COPY_MIN);
pci_dma_sync_single_for_device(cp->pdev, page->dma_addr + off, i, dma_sync_single_for_device(&cp->pdev->dev,
PCI_DMA_FROMDEVICE); page->dma_addr + off, i,
DMA_FROM_DEVICE);
cas_page_unmap(addr); cas_page_unmap(addr);
off += RX_COPY_MIN; off += RX_COPY_MIN;
swivel = RX_COPY_MIN; swivel = RX_COPY_MIN;
...@@ -2024,12 +2026,14 @@ static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc, ...@@ -2024,12 +2026,14 @@ static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc,
i = CAS_VAL(RX_COMP2_NEXT_INDEX, words[1]); i = CAS_VAL(RX_COMP2_NEXT_INDEX, words[1]);
page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)]; page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr, dma_sync_single_for_cpu(&cp->pdev->dev,
hlen + cp->crc_size, page->dma_addr,
PCI_DMA_FROMDEVICE); hlen + cp->crc_size,
pci_dma_sync_single_for_device(cp->pdev, page->dma_addr, DMA_FROM_DEVICE);
hlen + cp->crc_size, dma_sync_single_for_device(&cp->pdev->dev,
PCI_DMA_FROMDEVICE); page->dma_addr,
hlen + cp->crc_size,
DMA_FROM_DEVICE);
skb_shinfo(skb)->nr_frags++; skb_shinfo(skb)->nr_frags++;
skb->data_len += hlen; skb->data_len += hlen;
...@@ -2066,12 +2070,13 @@ static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc, ...@@ -2066,12 +2070,13 @@ static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc,
i = hlen; i = hlen;
if (i == dlen) /* attach FCS */ if (i == dlen) /* attach FCS */
i += cp->crc_size; i += cp->crc_size;
pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr + off, i, dma_sync_single_for_cpu(&cp->pdev->dev, page->dma_addr + off,
PCI_DMA_FROMDEVICE); i, DMA_FROM_DEVICE);
addr = cas_page_map(page->buffer); addr = cas_page_map(page->buffer);
memcpy(p, addr + off, i); memcpy(p, addr + off, i);
pci_dma_sync_single_for_device(cp->pdev, page->dma_addr + off, i, dma_sync_single_for_device(&cp->pdev->dev,
PCI_DMA_FROMDEVICE); page->dma_addr + off, i,
DMA_FROM_DEVICE);
cas_page_unmap(addr); cas_page_unmap(addr);
if (p == (char *) skb->data) /* not split */ if (p == (char *) skb->data) /* not split */
RX_USED_ADD(page, cp->mtu_stride); RX_USED_ADD(page, cp->mtu_stride);
...@@ -2083,14 +2088,16 @@ static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc, ...@@ -2083,14 +2088,16 @@ static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc,
p += hlen; p += hlen;
i = CAS_VAL(RX_COMP2_NEXT_INDEX, words[1]); i = CAS_VAL(RX_COMP2_NEXT_INDEX, words[1]);
page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)]; page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr, dma_sync_single_for_cpu(&cp->pdev->dev,
dlen + cp->crc_size, page->dma_addr,
PCI_DMA_FROMDEVICE); dlen + cp->crc_size,
DMA_FROM_DEVICE);
addr = cas_page_map(page->buffer); addr = cas_page_map(page->buffer);
memcpy(p, addr, dlen + cp->crc_size); memcpy(p, addr, dlen + cp->crc_size);
pci_dma_sync_single_for_device(cp->pdev, page->dma_addr, dma_sync_single_for_device(&cp->pdev->dev,
dlen + cp->crc_size, page->dma_addr,
PCI_DMA_FROMDEVICE); dlen + cp->crc_size,
DMA_FROM_DEVICE);
cas_page_unmap(addr); cas_page_unmap(addr);
RX_USED_ADD(page, dlen + cp->crc_size); RX_USED_ADD(page, dlen + cp->crc_size);
} }
...@@ -2766,9 +2773,8 @@ static inline int cas_xmit_tx_ringN(struct cas *cp, int ring, ...@@ -2766,9 +2773,8 @@ static inline int cas_xmit_tx_ringN(struct cas *cp, int ring,
nr_frags = skb_shinfo(skb)->nr_frags; nr_frags = skb_shinfo(skb)->nr_frags;
len = skb_headlen(skb); len = skb_headlen(skb);
mapping = pci_map_page(cp->pdev, virt_to_page(skb->data), mapping = dma_map_page(&cp->pdev->dev, virt_to_page(skb->data),
offset_in_page(skb->data), len, offset_in_page(skb->data), len, DMA_TO_DEVICE);
PCI_DMA_TODEVICE);
tentry = entry; tentry = entry;
tabort = cas_calc_tabort(cp, (unsigned long) skb->data, len); tabort = cas_calc_tabort(cp, (unsigned long) skb->data, len);
...@@ -3882,8 +3888,8 @@ static void cas_clean_txd(struct cas *cp, int ring) ...@@ -3882,8 +3888,8 @@ static void cas_clean_txd(struct cas *cp, int ring)
daddr = le64_to_cpu(txd[ent].buffer); daddr = le64_to_cpu(txd[ent].buffer);
dlen = CAS_VAL(TX_DESC_BUFLEN, dlen = CAS_VAL(TX_DESC_BUFLEN,
le64_to_cpu(txd[ent].control)); le64_to_cpu(txd[ent].control));
pci_unmap_page(cp->pdev, daddr, dlen, dma_unmap_page(&cp->pdev->dev, daddr, dlen,
PCI_DMA_TODEVICE); DMA_TO_DEVICE);
if (frag != skb_shinfo(skb)->nr_frags) { if (frag != skb_shinfo(skb)->nr_frags) {
i++; i++;
...@@ -4181,9 +4187,8 @@ static void cas_tx_tiny_free(struct cas *cp) ...@@ -4181,9 +4187,8 @@ static void cas_tx_tiny_free(struct cas *cp)
if (!cp->tx_tiny_bufs[i]) if (!cp->tx_tiny_bufs[i])
continue; continue;
pci_free_consistent(pdev, TX_TINY_BUF_BLOCK, dma_free_coherent(&pdev->dev, TX_TINY_BUF_BLOCK,
cp->tx_tiny_bufs[i], cp->tx_tiny_bufs[i], cp->tx_tiny_dvma[i]);
cp->tx_tiny_dvma[i]);
cp->tx_tiny_bufs[i] = NULL; cp->tx_tiny_bufs[i] = NULL;
} }
} }
...@@ -4195,8 +4200,8 @@ static int cas_tx_tiny_alloc(struct cas *cp) ...@@ -4195,8 +4200,8 @@ static int cas_tx_tiny_alloc(struct cas *cp)
for (i = 0; i < N_TX_RINGS; i++) { for (i = 0; i < N_TX_RINGS; i++) {
cp->tx_tiny_bufs[i] = cp->tx_tiny_bufs[i] =
pci_alloc_consistent(pdev, TX_TINY_BUF_BLOCK, dma_alloc_coherent(&pdev->dev, TX_TINY_BUF_BLOCK,
&cp->tx_tiny_dvma[i]); &cp->tx_tiny_dvma[i], GFP_KERNEL);
if (!cp->tx_tiny_bufs[i]) { if (!cp->tx_tiny_bufs[i]) {
cas_tx_tiny_free(cp); cas_tx_tiny_free(cp);
return -1; return -1;
...@@ -4958,10 +4963,9 @@ static int cas_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -4958,10 +4963,9 @@ static int cas_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Configure DMA attributes. */ /* Configure DMA attributes. */
if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
pci_using_dac = 1; pci_using_dac = 1;
err = pci_set_consistent_dma_mask(pdev, err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
DMA_BIT_MASK(64));
if (err < 0) { if (err < 0) {
dev_err(&pdev->dev, "Unable to obtain 64-bit DMA " dev_err(&pdev->dev, "Unable to obtain 64-bit DMA "
"for consistent allocations\n"); "for consistent allocations\n");
...@@ -4969,7 +4973,7 @@ static int cas_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -4969,7 +4973,7 @@ static int cas_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
} }
} else { } else {
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
if (err) { if (err) {
dev_err(&pdev->dev, "No usable DMA configuration, " dev_err(&pdev->dev, "No usable DMA configuration, "
"aborting\n"); "aborting\n");
...@@ -5048,8 +5052,8 @@ static int cas_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -5048,8 +5052,8 @@ static int cas_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
cas_saturn_firmware_init(cp); cas_saturn_firmware_init(cp);
cp->init_block = cp->init_block =
pci_alloc_consistent(pdev, sizeof(struct cas_init_block), dma_alloc_coherent(&pdev->dev, sizeof(struct cas_init_block),
&cp->block_dvma); &cp->block_dvma, GFP_KERNEL);
if (!cp->init_block) { if (!cp->init_block) {
dev_err(&pdev->dev, "Cannot allocate init block, aborting\n"); dev_err(&pdev->dev, "Cannot allocate init block, aborting\n");
goto err_out_iounmap; goto err_out_iounmap;
...@@ -5109,8 +5113,8 @@ static int cas_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -5109,8 +5113,8 @@ static int cas_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
return 0; return 0;
err_out_free_consistent: err_out_free_consistent:
pci_free_consistent(pdev, sizeof(struct cas_init_block), dma_free_coherent(&pdev->dev, sizeof(struct cas_init_block),
cp->init_block, cp->block_dvma); cp->init_block, cp->block_dvma);
err_out_iounmap: err_out_iounmap:
mutex_lock(&cp->pm_mutex); mutex_lock(&cp->pm_mutex);
...@@ -5164,8 +5168,8 @@ static void cas_remove_one(struct pci_dev *pdev) ...@@ -5164,8 +5168,8 @@ static void cas_remove_one(struct pci_dev *pdev)
cp->orig_cacheline_size); cp->orig_cacheline_size);
} }
#endif #endif
pci_free_consistent(pdev, sizeof(struct cas_init_block), dma_free_coherent(&pdev->dev, sizeof(struct cas_init_block),
cp->init_block, cp->block_dvma); cp->init_block, cp->block_dvma);
pci_iounmap(pdev, cp->regs); pci_iounmap(pdev, cp->regs);
free_netdev(dev); free_netdev(dev);
pci_release_regions(pdev); pci_release_regions(pdev);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment