Commit a4abe0ad authored by Christoph Hellwig's avatar Christoph Hellwig

xen-swiotlb: remove the mapping_error dma_map_ops method

Return DMA_MAPPING_ERROR instead of 0 on a dma mapping failure and let
the core dma-mapping code handle the rest.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Acked-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent cad34be7
...@@ -53,8 +53,6 @@ ...@@ -53,8 +53,6 @@
* API. * API.
*/ */
#define XEN_SWIOTLB_ERROR_CODE (~(dma_addr_t)0x0)
static char *xen_io_tlb_start, *xen_io_tlb_end; static char *xen_io_tlb_start, *xen_io_tlb_end;
static unsigned long xen_io_tlb_nslabs; static unsigned long xen_io_tlb_nslabs;
/* /*
...@@ -406,7 +404,7 @@ static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page, ...@@ -406,7 +404,7 @@ static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
map = swiotlb_tbl_map_single(dev, start_dma_addr, phys, size, dir, map = swiotlb_tbl_map_single(dev, start_dma_addr, phys, size, dir,
attrs); attrs);
if (map == SWIOTLB_MAP_ERROR) if (map == SWIOTLB_MAP_ERROR)
return XEN_SWIOTLB_ERROR_CODE; return DMA_MAPPING_ERROR;
dev_addr = xen_phys_to_bus(map); dev_addr = xen_phys_to_bus(map);
xen_dma_map_page(dev, pfn_to_page(map >> PAGE_SHIFT), xen_dma_map_page(dev, pfn_to_page(map >> PAGE_SHIFT),
...@@ -421,7 +419,7 @@ static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page, ...@@ -421,7 +419,7 @@ static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
attrs |= DMA_ATTR_SKIP_CPU_SYNC; attrs |= DMA_ATTR_SKIP_CPU_SYNC;
swiotlb_tbl_unmap_single(dev, map, size, dir, attrs); swiotlb_tbl_unmap_single(dev, map, size, dir, attrs);
return XEN_SWIOTLB_ERROR_CODE; return DMA_MAPPING_ERROR;
} }
/* /*
...@@ -700,11 +698,6 @@ xen_swiotlb_get_sgtable(struct device *dev, struct sg_table *sgt, ...@@ -700,11 +698,6 @@ xen_swiotlb_get_sgtable(struct device *dev, struct sg_table *sgt,
return dma_common_get_sgtable(dev, sgt, cpu_addr, handle, size, attrs); return dma_common_get_sgtable(dev, sgt, cpu_addr, handle, size, attrs);
} }
static int xen_swiotlb_mapping_error(struct device *dev, dma_addr_t dma_addr)
{
return dma_addr == XEN_SWIOTLB_ERROR_CODE;
}
const struct dma_map_ops xen_swiotlb_dma_ops = { const struct dma_map_ops xen_swiotlb_dma_ops = {
.alloc = xen_swiotlb_alloc_coherent, .alloc = xen_swiotlb_alloc_coherent,
.free = xen_swiotlb_free_coherent, .free = xen_swiotlb_free_coherent,
...@@ -719,5 +712,4 @@ const struct dma_map_ops xen_swiotlb_dma_ops = { ...@@ -719,5 +712,4 @@ const struct dma_map_ops xen_swiotlb_dma_ops = {
.dma_supported = xen_swiotlb_dma_supported, .dma_supported = xen_swiotlb_dma_supported,
.mmap = xen_swiotlb_dma_mmap, .mmap = xen_swiotlb_dma_mmap,
.get_sgtable = xen_swiotlb_get_sgtable, .get_sgtable = xen_swiotlb_get_sgtable,
.mapping_error = xen_swiotlb_mapping_error,
}; };
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment