Commit 5d23a1d2 authored by FUJITA Tomonori's avatar FUJITA Tomonori Committed by David S. Miller

net: replace dma_sync_single with dma_sync_single_for_cpu

This replaces dma_sync_single() with dma_sync_single_for_cpu() because
dma_sync_single() is an obsolete API; include/linux/dma-mapping.h says:

/* Backwards compat, remove in 2.7.x */
#define dma_sync_single		dma_sync_single_for_cpu
#define dma_sync_sg		dma_sync_sg_for_cpu
Signed-off-by: default avatarFUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 108bfa89
...@@ -253,7 +253,7 @@ static int ep93xx_rx(struct net_device *dev, int processed, int budget) ...@@ -253,7 +253,7 @@ static int ep93xx_rx(struct net_device *dev, int processed, int budget)
skb = dev_alloc_skb(length + 2); skb = dev_alloc_skb(length + 2);
if (likely(skb != NULL)) { if (likely(skb != NULL)) {
skb_reserve(skb, 2); skb_reserve(skb, 2);
dma_sync_single(NULL, ep->descs->rdesc[entry].buf_addr, dma_sync_single_for_cpu(NULL, ep->descs->rdesc[entry].buf_addr,
length, DMA_FROM_DEVICE); length, DMA_FROM_DEVICE);
skb_copy_to_linear_data(skb, ep->rx_buf[entry], length); skb_copy_to_linear_data(skb, ep->rx_buf[entry], length);
skb_put(skb, length); skb_put(skb, length);
...@@ -331,7 +331,7 @@ static int ep93xx_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -331,7 +331,7 @@ static int ep93xx_xmit(struct sk_buff *skb, struct net_device *dev)
ep->descs->tdesc[entry].tdesc1 = ep->descs->tdesc[entry].tdesc1 =
TDESC1_EOF | (entry << 16) | (skb->len & 0xfff); TDESC1_EOF | (entry << 16) | (skb->len & 0xfff);
skb_copy_and_csum_dev(skb, ep->tx_buf[entry]); skb_copy_and_csum_dev(skb, ep->tx_buf[entry]);
dma_sync_single(NULL, ep->descs->tdesc[entry].buf_addr, dma_sync_single_for_cpu(NULL, ep->descs->tdesc[entry].buf_addr,
skb->len, DMA_TO_DEVICE); skb->len, DMA_TO_DEVICE);
dev_kfree_skb(skb); dev_kfree_skb(skb);
......
...@@ -561,7 +561,7 @@ static int eth_poll(struct napi_struct *napi, int budget) ...@@ -561,7 +561,7 @@ static int eth_poll(struct napi_struct *napi, int budget)
dma_unmap_single(&dev->dev, desc->data - NET_IP_ALIGN, dma_unmap_single(&dev->dev, desc->data - NET_IP_ALIGN,
RX_BUFF_SIZE, DMA_FROM_DEVICE); RX_BUFF_SIZE, DMA_FROM_DEVICE);
#else #else
dma_sync_single(&dev->dev, desc->data - NET_IP_ALIGN, dma_sync_single_for_cpu(&dev->dev, desc->data - NET_IP_ALIGN,
RX_BUFF_SIZE, DMA_FROM_DEVICE); RX_BUFF_SIZE, DMA_FROM_DEVICE);
memcpy_swab32((u32 *)skb->data, (u32 *)port->rx_buff_tab[n], memcpy_swab32((u32 *)skb->data, (u32 *)port->rx_buff_tab[n],
ALIGN(NET_IP_ALIGN + desc->pkt_len, 4) / 4); ALIGN(NET_IP_ALIGN + desc->pkt_len, 4) / 4);
......
...@@ -402,7 +402,8 @@ static int mlx4_write_mtt_chunk(struct mlx4_dev *dev, struct mlx4_mtt *mtt, ...@@ -402,7 +402,8 @@ static int mlx4_write_mtt_chunk(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
for (i = 0; i < npages; ++i) for (i = 0; i < npages; ++i)
mtts[i] = cpu_to_be64(page_list[i] | MLX4_MTT_FLAG_PRESENT); mtts[i] = cpu_to_be64(page_list[i] | MLX4_MTT_FLAG_PRESENT);
dma_sync_single(&dev->pdev->dev, dma_handle, npages * sizeof (u64), DMA_TO_DEVICE); dma_sync_single_for_cpu(&dev->pdev->dev, dma_handle,
npages * sizeof (u64), DMA_TO_DEVICE);
return 0; return 0;
} }
...@@ -549,7 +550,7 @@ int mlx4_map_phys_fmr(struct mlx4_dev *dev, struct mlx4_fmr *fmr, u64 *page_list ...@@ -549,7 +550,7 @@ int mlx4_map_phys_fmr(struct mlx4_dev *dev, struct mlx4_fmr *fmr, u64 *page_list
for (i = 0; i < npages; ++i) for (i = 0; i < npages; ++i)
fmr->mtts[i] = cpu_to_be64(page_list[i] | MLX4_MTT_FLAG_PRESENT); fmr->mtts[i] = cpu_to_be64(page_list[i] | MLX4_MTT_FLAG_PRESENT);
dma_sync_single(&dev->pdev->dev, fmr->dma_handle, dma_sync_single_for_cpu(&dev->pdev->dev, fmr->dma_handle,
npages * sizeof(u64), DMA_TO_DEVICE); npages * sizeof(u64), DMA_TO_DEVICE);
fmr->mpt->key = cpu_to_be32(key); fmr->mpt->key = cpu_to_be32(key);
......
...@@ -731,7 +731,7 @@ static int hss_hdlc_poll(struct napi_struct *napi, int budget) ...@@ -731,7 +731,7 @@ static int hss_hdlc_poll(struct napi_struct *napi, int budget)
dma_unmap_single(&dev->dev, desc->data, dma_unmap_single(&dev->dev, desc->data,
RX_SIZE, DMA_FROM_DEVICE); RX_SIZE, DMA_FROM_DEVICE);
#else #else
dma_sync_single(&dev->dev, desc->data, dma_sync_single_for_cpu(&dev->dev, desc->data,
RX_SIZE, DMA_FROM_DEVICE); RX_SIZE, DMA_FROM_DEVICE);
memcpy_swab32((u32 *)skb->data, (u32 *)port->rx_buff_tab[n], memcpy_swab32((u32 *)skb->data, (u32 *)port->rx_buff_tab[n],
ALIGN(desc->pkt_len, 4) / 4); ALIGN(desc->pkt_len, 4) / 4);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment