Commit cfced786 authored by Christoph Hellwig's avatar Christoph Hellwig

dma-mapping: remove the default map_resource implementation

Instead provide a proper implementation in the direct mapping code, and
also wire it up for arm and powerpc, leaving an error return for all the
IOMMU or virtual mapping instances for which we'd have to wire up an
actual implementation
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Tested-by: default avatarMarek Szyprowski <m.szyprowski@samsung.com>
parent f17b5f06
...@@ -188,6 +188,7 @@ const struct dma_map_ops arm_dma_ops = { ...@@ -188,6 +188,7 @@ const struct dma_map_ops arm_dma_ops = {
.unmap_page = arm_dma_unmap_page, .unmap_page = arm_dma_unmap_page,
.map_sg = arm_dma_map_sg, .map_sg = arm_dma_map_sg,
.unmap_sg = arm_dma_unmap_sg, .unmap_sg = arm_dma_unmap_sg,
.map_resource = dma_direct_map_resource,
.sync_single_for_cpu = arm_dma_sync_single_for_cpu, .sync_single_for_cpu = arm_dma_sync_single_for_cpu,
.sync_single_for_device = arm_dma_sync_single_for_device, .sync_single_for_device = arm_dma_sync_single_for_device,
.sync_sg_for_cpu = arm_dma_sync_sg_for_cpu, .sync_sg_for_cpu = arm_dma_sync_sg_for_cpu,
...@@ -211,6 +212,7 @@ const struct dma_map_ops arm_coherent_dma_ops = { ...@@ -211,6 +212,7 @@ const struct dma_map_ops arm_coherent_dma_ops = {
.get_sgtable = arm_dma_get_sgtable, .get_sgtable = arm_dma_get_sgtable,
.map_page = arm_coherent_dma_map_page, .map_page = arm_coherent_dma_map_page,
.map_sg = arm_dma_map_sg, .map_sg = arm_dma_map_sg,
.map_resource = dma_direct_map_resource,
.dma_supported = arm_dma_supported, .dma_supported = arm_dma_supported,
}; };
EXPORT_SYMBOL(arm_coherent_dma_ops); EXPORT_SYMBOL(arm_coherent_dma_ops);
......
...@@ -55,6 +55,7 @@ const struct dma_map_ops powerpc_swiotlb_dma_ops = { ...@@ -55,6 +55,7 @@ const struct dma_map_ops powerpc_swiotlb_dma_ops = {
.dma_supported = swiotlb_dma_supported, .dma_supported = swiotlb_dma_supported,
.map_page = dma_direct_map_page, .map_page = dma_direct_map_page,
.unmap_page = dma_direct_unmap_page, .unmap_page = dma_direct_unmap_page,
.map_resource = dma_direct_map_resource,
.sync_single_for_cpu = dma_direct_sync_single_for_cpu, .sync_single_for_cpu = dma_direct_sync_single_for_cpu,
.sync_single_for_device = dma_direct_sync_single_for_device, .sync_single_for_device = dma_direct_sync_single_for_device,
.sync_sg_for_cpu = dma_direct_sync_sg_for_cpu, .sync_sg_for_cpu = dma_direct_sync_sg_for_cpu,
......
...@@ -273,6 +273,7 @@ const struct dma_map_ops dma_nommu_ops = { ...@@ -273,6 +273,7 @@ const struct dma_map_ops dma_nommu_ops = {
.dma_supported = dma_nommu_dma_supported, .dma_supported = dma_nommu_dma_supported,
.map_page = dma_nommu_map_page, .map_page = dma_nommu_map_page,
.unmap_page = dma_nommu_unmap_page, .unmap_page = dma_nommu_unmap_page,
.map_resource = dma_direct_map_resource,
.get_required_mask = dma_nommu_get_required_mask, .get_required_mask = dma_nommu_get_required_mask,
#ifdef CONFIG_NOT_COHERENT_CACHE #ifdef CONFIG_NOT_COHERENT_CACHE
.sync_single_for_cpu = dma_nommu_sync_single, .sync_single_for_cpu = dma_nommu_sync_single,
......
...@@ -208,6 +208,8 @@ dma_addr_t dma_direct_map_page(struct device *dev, struct page *page, ...@@ -208,6 +208,8 @@ dma_addr_t dma_direct_map_page(struct device *dev, struct page *page,
unsigned long attrs); unsigned long attrs);
int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents, int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
enum dma_data_direction dir, unsigned long attrs); enum dma_data_direction dir, unsigned long attrs);
dma_addr_t dma_direct_map_resource(struct device *dev, phys_addr_t paddr,
size_t size, enum dma_data_direction dir, unsigned long attrs);
#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \ #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \
defined(CONFIG_SWIOTLB) defined(CONFIG_SWIOTLB)
...@@ -346,19 +348,19 @@ static inline dma_addr_t dma_map_resource(struct device *dev, ...@@ -346,19 +348,19 @@ static inline dma_addr_t dma_map_resource(struct device *dev,
unsigned long attrs) unsigned long attrs)
{ {
const struct dma_map_ops *ops = get_dma_ops(dev); const struct dma_map_ops *ops = get_dma_ops(dev);
dma_addr_t addr; dma_addr_t addr = DMA_MAPPING_ERROR;
BUG_ON(!valid_dma_direction(dir)); BUG_ON(!valid_dma_direction(dir));
/* Don't allow RAM to be mapped */ /* Don't allow RAM to be mapped */
BUG_ON(pfn_valid(PHYS_PFN(phys_addr))); BUG_ON(pfn_valid(PHYS_PFN(phys_addr)));
addr = phys_addr; if (dma_is_direct(ops))
if (ops && ops->map_resource) addr = dma_direct_map_resource(dev, phys_addr, size, dir, attrs);
else if (ops->map_resource)
addr = ops->map_resource(dev, phys_addr, size, dir, attrs); addr = ops->map_resource(dev, phys_addr, size, dir, attrs);
debug_dma_map_resource(dev, phys_addr, size, dir, addr); debug_dma_map_resource(dev, phys_addr, size, dir, addr);
return addr; return addr;
} }
...@@ -369,7 +371,7 @@ static inline void dma_unmap_resource(struct device *dev, dma_addr_t addr, ...@@ -369,7 +371,7 @@ static inline void dma_unmap_resource(struct device *dev, dma_addr_t addr,
const struct dma_map_ops *ops = get_dma_ops(dev); const struct dma_map_ops *ops = get_dma_ops(dev);
BUG_ON(!valid_dma_direction(dir)); BUG_ON(!valid_dma_direction(dir));
if (ops && ops->unmap_resource) if (!dma_is_direct(ops) && ops->unmap_resource)
ops->unmap_resource(dev, addr, size, dir, attrs); ops->unmap_resource(dev, addr, size, dir, attrs);
debug_dma_unmap_resource(dev, addr, size, dir); debug_dma_unmap_resource(dev, addr, size, dir);
} }
......
...@@ -356,6 +356,20 @@ int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents, ...@@ -356,6 +356,20 @@ int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
} }
EXPORT_SYMBOL(dma_direct_map_sg); EXPORT_SYMBOL(dma_direct_map_sg);
dma_addr_t dma_direct_map_resource(struct device *dev, phys_addr_t paddr,
size_t size, enum dma_data_direction dir, unsigned long attrs)
{
dma_addr_t dma_addr = paddr;
if (unlikely(!dma_direct_possible(dev, dma_addr, size))) {
report_addr(dev, dma_addr, size);
return DMA_MAPPING_ERROR;
}
return dma_addr;
}
EXPORT_SYMBOL(dma_direct_map_resource);
/* /*
* Because 32-bit DMA masks are so common we expect every architecture to be * Because 32-bit DMA masks are so common we expect every architecture to be
* able to satisfy them - either by not supporting more physical memory, or by * able to satisfy them - either by not supporting more physical memory, or by
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment