Commit 8680aa5a authored by Robin Murphy's avatar Robin Murphy Committed by Joerg Roedel

iommu/dma: Don't remap CMA unnecessarily

Always remapping CMA allocations was largely a bodge to keep the freeing
logic manageable when it was split between here and an arch wrapper. Now
that it's all together and streamlined, we can relax that limitation.
Signed-off-by: default avatarRobin Murphy <robin.murphy@arm.com>
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarJoerg Roedel <jroedel@suse.de>
parent 072bebc0
......@@ -973,7 +973,6 @@ static void *iommu_dma_alloc(struct device *dev, size_t size,
{
bool coherent = dev_is_dma_coherent(dev);
int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs);
pgprot_t prot = arch_dma_mmap_pgprot(dev, PAGE_KERNEL, attrs);
size_t iosize = size;
struct page *page;
void *addr;
......@@ -1021,6 +1020,9 @@ static void *iommu_dma_alloc(struct device *dev, size_t size,
if (*handle == DMA_MAPPING_ERROR)
goto out_free_pages;
if (!coherent || PageHighMem(page)) {
pgprot_t prot = arch_dma_mmap_pgprot(dev, PAGE_KERNEL, attrs);
addr = dma_common_contiguous_remap(page, size, VM_USERMAP, prot,
__builtin_return_address(0));
if (!addr)
......@@ -1028,6 +1030,9 @@ static void *iommu_dma_alloc(struct device *dev, size_t size,
if (!coherent)
arch_dma_prep_coherent(page, iosize);
} else {
addr = page_address(page);
}
memset(addr, 0, size);
return addr;
out_unmap:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment