Commit a4a4330d authored by Christoph Hellwig's avatar Christoph Hellwig

swiotlb: add support for non-coherent DMA

Handle architectures that are not cache coherent directly in the main
swiotlb code by calling arch_sync_dma_for_{device,cpu} in all the right
places from the various dma_map/unmap/sync methods when the device is
non-coherent.

Because swiotlb now uses dma_direct_alloc for the coherent allocation
that side is already taken care of by the dma-direct code calling into
arch_dma_{alloc,free} for devices that are non-coherent.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Acked-by: default avatarKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>
parent fafadcd1
...@@ -21,6 +21,7 @@ ...@@ -21,6 +21,7 @@
#include <linux/cache.h> #include <linux/cache.h>
#include <linux/dma-direct.h> #include <linux/dma-direct.h>
#include <linux/dma-noncoherent.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/export.h> #include <linux/export.h>
#include <linux/spinlock.h> #include <linux/spinlock.h>
...@@ -671,11 +672,17 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page *page, ...@@ -671,11 +672,17 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
* we can safely return the device addr and not worry about bounce * we can safely return the device addr and not worry about bounce
* buffering it. * buffering it.
*/ */
if (dma_capable(dev, dev_addr, size) && swiotlb_force != SWIOTLB_FORCE) if (!dma_capable(dev, dev_addr, size) ||
return dev_addr; swiotlb_force == SWIOTLB_FORCE) {
trace_swiotlb_bounced(dev, dev_addr, size, swiotlb_force); trace_swiotlb_bounced(dev, dev_addr, size, swiotlb_force);
return swiotlb_bounce_page(dev, &phys, size, dir, attrs); dev_addr = swiotlb_bounce_page(dev, &phys, size, dir, attrs);
}
if (!dev_is_dma_coherent(dev) &&
(attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
arch_sync_dma_for_device(dev, phys, size, dir);
return dev_addr;
} }
/* /*
...@@ -694,6 +701,10 @@ void swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr, ...@@ -694,6 +701,10 @@ void swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
BUG_ON(dir == DMA_NONE); BUG_ON(dir == DMA_NONE);
if (!dev_is_dma_coherent(hwdev) &&
(attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
arch_sync_dma_for_cpu(hwdev, paddr, size, dir);
if (is_swiotlb_buffer(paddr)) { if (is_swiotlb_buffer(paddr)) {
swiotlb_tbl_unmap_single(hwdev, paddr, size, dir, attrs); swiotlb_tbl_unmap_single(hwdev, paddr, size, dir, attrs);
return; return;
...@@ -730,14 +741,16 @@ swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr, ...@@ -730,14 +741,16 @@ swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr,
BUG_ON(dir == DMA_NONE); BUG_ON(dir == DMA_NONE);
if (is_swiotlb_buffer(paddr)) { if (!dev_is_dma_coherent(hwdev) && target == SYNC_FOR_CPU)
arch_sync_dma_for_cpu(hwdev, paddr, size, dir);
if (is_swiotlb_buffer(paddr))
swiotlb_tbl_sync_single(hwdev, paddr, size, dir, target); swiotlb_tbl_sync_single(hwdev, paddr, size, dir, target);
return;
}
if (dir != DMA_FROM_DEVICE) if (!dev_is_dma_coherent(hwdev) && target == SYNC_FOR_DEVICE)
return; arch_sync_dma_for_device(hwdev, paddr, size, dir);
if (!is_swiotlb_buffer(paddr) && dir == DMA_FROM_DEVICE)
dma_mark_clean(phys_to_virt(paddr), size); dma_mark_clean(phys_to_virt(paddr), size);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment