Commit 8c172467 authored by Alex Smith's avatar Alex Smith Committed by Ralf Baechle

MIPS: Add implementation of dma_map_ops.mmap()

The generic implementation of dma_map_ops.mmap(), dma_common_mmap(),
is not correct for non-coherent devices. It expects to be passed a
virtual address previously returned by dma_alloc_coherent(), which for
a non-coherent device will return a KSEG1 address. It then attempts to
convert that virtual address to a physical address using virt_to_page()
which will yield an incorrect address.

Also, dma_common_mmap() does not handle the DMA_ATTR_WRITE_COMBINE
attribute, and therefore dma_mmap_writecombine() will not actually set
the appropriate pgprot_t flags for write combining.

This patch adds an implementation of dma_map_ops.mmap() that correctly
handles KSEG1 addresses, and enables write combining when requested.
Signed-off-by: default avatarAlex Smith <alex.smith@imgtec.com>
Cc: Sadegh Abbasi <Sadegh.Abbasi@imgtec.com>
Cc: linux-mips@linux-mips.org
Patchwork: https://patchwork.linux-mips.org/patch/10808/Signed-off-by: default avatarRalf Baechle <ralf@linux-mips.org>
parent c4687b15
...@@ -194,6 +194,40 @@ static void mips_dma_free_coherent(struct device *dev, size_t size, void *vaddr, ...@@ -194,6 +194,40 @@ static void mips_dma_free_coherent(struct device *dev, size_t size, void *vaddr,
__free_pages(page, get_order(size)); __free_pages(page, get_order(size));
} }
static int mips_dma_mmap(struct device *dev, struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t dma_addr, size_t size,
struct dma_attrs *attrs)
{
unsigned long user_count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
unsigned long addr = (unsigned long)cpu_addr;
unsigned long off = vma->vm_pgoff;
unsigned long pfn;
int ret = -ENXIO;
if (!plat_device_is_coherent(dev) && !hw_coherentio)
addr = CAC_ADDR(addr);
pfn = page_to_pfn(virt_to_page((void *)addr));
if (dma_get_attr(DMA_ATTR_WRITE_COMBINE, attrs))
vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
else
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret))
return ret;
if (off < count && user_count <= (count - off)) {
ret = remap_pfn_range(vma, vma->vm_start,
pfn + off,
user_count << PAGE_SHIFT,
vma->vm_page_prot);
}
return ret;
}
static inline void __dma_sync_virtual(void *addr, size_t size, static inline void __dma_sync_virtual(void *addr, size_t size,
enum dma_data_direction direction) enum dma_data_direction direction)
{ {
...@@ -380,6 +414,7 @@ EXPORT_SYMBOL(dma_cache_sync); ...@@ -380,6 +414,7 @@ EXPORT_SYMBOL(dma_cache_sync);
static struct dma_map_ops mips_default_dma_map_ops = { static struct dma_map_ops mips_default_dma_map_ops = {
.alloc = mips_dma_alloc_coherent, .alloc = mips_dma_alloc_coherent,
.free = mips_dma_free_coherent, .free = mips_dma_free_coherent,
.mmap = mips_dma_mmap,
.map_page = mips_dma_map_page, .map_page = mips_dma_map_page,
.unmap_page = mips_dma_unmap_page, .unmap_page = mips_dma_unmap_page,
.map_sg = mips_dma_map_sg, .map_sg = mips_dma_map_sg,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment