Commit 53e8cdeb authored by Andrew Morton's avatar Andrew Morton Committed by Linus Torvalds

[PATCH] ppc64: replace vio_dma_mapping_error with dma_mapping_error everywhere.

From: Stephen Rothwell <sfr@canb.auug.org.au>

James Bottomley is right, this was a mistake.  This patch replaces
vio_dma_mapping_error with dma_mapping_error everywhere.
parent 8277a1fa
...@@ -404,7 +404,7 @@ static inline void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter) ...@@ -404,7 +404,7 @@ static inline void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter)
static void ibmveth_cleanup(struct ibmveth_adapter *adapter) static void ibmveth_cleanup(struct ibmveth_adapter *adapter)
{ {
if(adapter->buffer_list_addr != NULL) { if(adapter->buffer_list_addr != NULL) {
if(!vio_dma_mapping_error(adapter->buffer_list_dma)) { if(!dma_mapping_error(adapter->buffer_list_dma)) {
vio_unmap_single(adapter->vdev, adapter->buffer_list_dma, 4096, DMA_BIDIRECTIONAL); vio_unmap_single(adapter->vdev, adapter->buffer_list_dma, 4096, DMA_BIDIRECTIONAL);
adapter->buffer_list_dma = DMA_ERROR_CODE; adapter->buffer_list_dma = DMA_ERROR_CODE;
} }
...@@ -413,7 +413,7 @@ static void ibmveth_cleanup(struct ibmveth_adapter *adapter) ...@@ -413,7 +413,7 @@ static void ibmveth_cleanup(struct ibmveth_adapter *adapter)
} }
if(adapter->filter_list_addr != NULL) { if(adapter->filter_list_addr != NULL) {
if(!vio_dma_mapping_error(adapter->filter_list_dma)) { if(!dma_mapping_error(adapter->filter_list_dma)) {
vio_unmap_single(adapter->vdev, adapter->filter_list_dma, 4096, DMA_BIDIRECTIONAL); vio_unmap_single(adapter->vdev, adapter->filter_list_dma, 4096, DMA_BIDIRECTIONAL);
adapter->filter_list_dma = DMA_ERROR_CODE; adapter->filter_list_dma = DMA_ERROR_CODE;
} }
...@@ -422,7 +422,7 @@ static void ibmveth_cleanup(struct ibmveth_adapter *adapter) ...@@ -422,7 +422,7 @@ static void ibmveth_cleanup(struct ibmveth_adapter *adapter)
} }
if(adapter->rx_queue.queue_addr != NULL) { if(adapter->rx_queue.queue_addr != NULL) {
if(!vio_dma_mapping_error(adapter->rx_queue.queue_dma)) { if(!dma_mapping_error(adapter->rx_queue.queue_dma)) {
vio_unmap_single(adapter->vdev, adapter->rx_queue.queue_dma, adapter->rx_queue.queue_len, DMA_BIDIRECTIONAL); vio_unmap_single(adapter->vdev, adapter->rx_queue.queue_dma, adapter->rx_queue.queue_len, DMA_BIDIRECTIONAL);
adapter->rx_queue.queue_dma = DMA_ERROR_CODE; adapter->rx_queue.queue_dma = DMA_ERROR_CODE;
} }
...@@ -473,9 +473,9 @@ static int ibmveth_open(struct net_device *netdev) ...@@ -473,9 +473,9 @@ static int ibmveth_open(struct net_device *netdev)
adapter->filter_list_dma = vio_map_single(adapter->vdev, adapter->filter_list_addr, 4096, DMA_BIDIRECTIONAL); adapter->filter_list_dma = vio_map_single(adapter->vdev, adapter->filter_list_addr, 4096, DMA_BIDIRECTIONAL);
adapter->rx_queue.queue_dma = vio_map_single(adapter->vdev, adapter->rx_queue.queue_addr, adapter->rx_queue.queue_len, DMA_BIDIRECTIONAL); adapter->rx_queue.queue_dma = vio_map_single(adapter->vdev, adapter->rx_queue.queue_addr, adapter->rx_queue.queue_len, DMA_BIDIRECTIONAL);
if((vio_dma_mapping_error(adapter->buffer_list_dma) ) || if((dma_mapping_error(adapter->buffer_list_dma) ) ||
(vio_dma_mapping_error(adapter->filter_list_dma)) || (dma_mapping_error(adapter->filter_list_dma)) ||
(vio_dma_mapping_error(adapter->rx_queue.queue_dma))) { (dma_mapping_error(adapter->rx_queue.queue_dma))) {
ibmveth_error_printk("unable to map filter or buffer list pages\n"); ibmveth_error_printk("unable to map filter or buffer list pages\n");
ibmveth_cleanup(adapter); ibmveth_cleanup(adapter);
return -ENOMEM; return -ENOMEM;
...@@ -644,7 +644,7 @@ static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *netdev) ...@@ -644,7 +644,7 @@ static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *netdev)
desc[0].fields.address = vio_map_single(adapter->vdev, skb->data, desc[0].fields.length, PCI_DMA_TODEVICE); desc[0].fields.address = vio_map_single(adapter->vdev, skb->data, desc[0].fields.length, PCI_DMA_TODEVICE);
desc[0].fields.valid = 1; desc[0].fields.valid = 1;
if(vio_dma_mapping_error(desc[0].fields.address)) { if(dma_mapping_error(desc[0].fields.address)) {
ibmveth_error_printk("tx: unable to map initial fragment\n"); ibmveth_error_printk("tx: unable to map initial fragment\n");
adapter->tx_map_failed++; adapter->tx_map_failed++;
adapter->stats.tx_dropped++; adapter->stats.tx_dropped++;
...@@ -663,7 +663,7 @@ static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *netdev) ...@@ -663,7 +663,7 @@ static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *netdev)
desc[curfrag+1].fields.length = frag->size; desc[curfrag+1].fields.length = frag->size;
desc[curfrag+1].fields.valid = 1; desc[curfrag+1].fields.valid = 1;
if(vio_dma_mapping_error(desc[curfrag+1].fields.address)) { if(dma_mapping_error(desc[curfrag+1].fields.address)) {
ibmveth_error_printk("tx: unable to map fragment %d\n", curfrag); ibmveth_error_printk("tx: unable to map fragment %d\n", curfrag);
adapter->tx_map_failed++; adapter->tx_map_failed++;
adapter->stats.tx_dropped++; adapter->stats.tx_dropped++;
......
...@@ -137,9 +137,4 @@ static inline struct vio_dev *to_vio_dev(struct device *dev) ...@@ -137,9 +137,4 @@ static inline struct vio_dev *to_vio_dev(struct device *dev)
return container_of(dev, struct vio_dev, dev); return container_of(dev, struct vio_dev, dev);
} }
static inline int vio_dma_mapping_error(dma_addr_t dma_addr)
{
return dma_mapping_error(dma_addr);
}
#endif /* _ASM_VIO_H */ #endif /* _ASM_VIO_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment