Commit c4dae366 authored by Christoph Hellwig's avatar Christoph Hellwig

swiotlb: refactor swiotlb_map_page

Remove the somewhat useless map_single function, and replace it with a
swiotlb_bounce_page handler that handles everything related to actually
bouncing a page.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarRobin Murphy <robin.murphy@arm.com>
Reviewed-by: default avatarKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>
parent 4803b44e
...@@ -543,26 +543,6 @@ phys_addr_t swiotlb_tbl_map_single(struct device *hwdev, ...@@ -543,26 +543,6 @@ phys_addr_t swiotlb_tbl_map_single(struct device *hwdev,
return tlb_addr; return tlb_addr;
} }
/*
* Allocates bounce buffer and returns its physical address.
*/
static phys_addr_t
map_single(struct device *hwdev, phys_addr_t phys, size_t size,
enum dma_data_direction dir, unsigned long attrs)
{
dma_addr_t start_dma_addr;
if (swiotlb_force == SWIOTLB_NO_FORCE) {
dev_warn_ratelimited(hwdev, "Cannot do DMA to address %pa\n",
&phys);
return SWIOTLB_MAP_ERROR;
}
start_dma_addr = __phys_to_dma(hwdev, io_tlb_start);
return swiotlb_tbl_map_single(hwdev, start_dma_addr, phys, size,
dir, attrs);
}
/* /*
* tlb_addr is the physical address of the bounce buffer to unmap. * tlb_addr is the physical address of the bounce buffer to unmap.
*/ */
...@@ -714,6 +694,34 @@ static bool swiotlb_free_buffer(struct device *dev, size_t size, ...@@ -714,6 +694,34 @@ static bool swiotlb_free_buffer(struct device *dev, size_t size,
return true; return true;
} }
static dma_addr_t swiotlb_bounce_page(struct device *dev, phys_addr_t *phys,
size_t size, enum dma_data_direction dir, unsigned long attrs)
{
dma_addr_t dma_addr;
if (unlikely(swiotlb_force == SWIOTLB_NO_FORCE)) {
dev_warn_ratelimited(dev,
"Cannot do DMA to address %pa\n", phys);
return DIRECT_MAPPING_ERROR;
}
/* Oh well, have to allocate and map a bounce buffer. */
*phys = swiotlb_tbl_map_single(dev, __phys_to_dma(dev, io_tlb_start),
*phys, size, dir, attrs);
if (*phys == SWIOTLB_MAP_ERROR)
return DIRECT_MAPPING_ERROR;
/* Ensure that the address returned is DMA'ble */
dma_addr = __phys_to_dma(dev, *phys);
if (unlikely(!dma_capable(dev, dma_addr, size))) {
swiotlb_tbl_unmap_single(dev, *phys, size, dir,
attrs | DMA_ATTR_SKIP_CPU_SYNC);
return DIRECT_MAPPING_ERROR;
}
return dma_addr;
}
/* /*
* Map a single buffer of the indicated size for DMA in streaming mode. The * Map a single buffer of the indicated size for DMA in streaming mode. The
* physical address to use is returned. * physical address to use is returned.
...@@ -726,7 +734,7 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page *page, ...@@ -726,7 +734,7 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
enum dma_data_direction dir, enum dma_data_direction dir,
unsigned long attrs) unsigned long attrs)
{ {
phys_addr_t map, phys = page_to_phys(page) + offset; phys_addr_t phys = page_to_phys(page) + offset;
dma_addr_t dev_addr = phys_to_dma(dev, phys); dma_addr_t dev_addr = phys_to_dma(dev, phys);
BUG_ON(dir == DMA_NONE); BUG_ON(dir == DMA_NONE);
...@@ -739,22 +747,7 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page *page, ...@@ -739,22 +747,7 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
return dev_addr; return dev_addr;
trace_swiotlb_bounced(dev, dev_addr, size, swiotlb_force); trace_swiotlb_bounced(dev, dev_addr, size, swiotlb_force);
return swiotlb_bounce_page(dev, &phys, size, dir, attrs);
/* Oh well, have to allocate and map a bounce buffer. */
map = map_single(dev, phys, size, dir, attrs);
if (map == SWIOTLB_MAP_ERROR)
return DIRECT_MAPPING_ERROR;
dev_addr = __phys_to_dma(dev, map);
/* Ensure that the address returned is DMA'ble */
if (dma_capable(dev, dev_addr, size))
return dev_addr;
attrs |= DMA_ATTR_SKIP_CPU_SYNC;
swiotlb_tbl_unmap_single(dev, map, size, dir, attrs);
return DIRECT_MAPPING_ERROR;
} }
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment