Commit 2c1de929 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Stafford Horne

openrisc: fix cache maintainance the the sync_single_for_device DMA operation

The cache maintaince in the sync_single_for_device operation should be
equivalent to the map_page operation to facilitate reusing buffers.  Fix the
openrisc implementation by moving the cache maintaince performed in map_page
into the sync_single method, and calling that from map_page.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarStafford Horne <shorne@gmail.com>
parent dcc9c919
...@@ -133,19 +133,15 @@ or1k_dma_free(struct device *dev, size_t size, void *vaddr, ...@@ -133,19 +133,15 @@ or1k_dma_free(struct device *dev, size_t size, void *vaddr,
free_pages_exact(vaddr, size); free_pages_exact(vaddr, size);
} }
static dma_addr_t static void
or1k_map_page(struct device *dev, struct page *page, or1k_sync_single_for_device(struct device *dev,
unsigned long offset, size_t size, dma_addr_t dma_handle, size_t size,
enum dma_data_direction dir, enum dma_data_direction dir)
unsigned long attrs)
{ {
unsigned long cl; unsigned long cl;
dma_addr_t addr = page_to_phys(page) + offset; dma_addr_t addr = dma_handle;
struct cpuinfo_or1k *cpuinfo = &cpuinfo_or1k[smp_processor_id()]; struct cpuinfo_or1k *cpuinfo = &cpuinfo_or1k[smp_processor_id()];
if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
return addr;
switch (dir) { switch (dir) {
case DMA_TO_DEVICE: case DMA_TO_DEVICE:
/* Flush the dcache for the requested range */ /* Flush the dcache for the requested range */
...@@ -168,6 +164,20 @@ or1k_map_page(struct device *dev, struct page *page, ...@@ -168,6 +164,20 @@ or1k_map_page(struct device *dev, struct page *page,
break; break;
} }
}
static dma_addr_t
or1k_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction dir,
unsigned long attrs)
{
unsigned long cl;
dma_addr_t addr = page_to_phys(page) + offset;
struct cpuinfo_or1k *cpuinfo = &cpuinfo_or1k[smp_processor_id()];
if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
or1k_sync_single_for_device(dev, addr, size, dir);
return addr; return addr;
} }
...@@ -187,20 +197,6 @@ or1k_map_sg(struct device *dev, struct scatterlist *sg, ...@@ -187,20 +197,6 @@ or1k_map_sg(struct device *dev, struct scatterlist *sg,
return nents; return nents;
} }
static void
or1k_sync_single_for_device(struct device *dev,
dma_addr_t dma_handle, size_t size,
enum dma_data_direction dir)
{
unsigned long cl;
dma_addr_t addr = dma_handle;
struct cpuinfo_or1k *cpuinfo = &cpuinfo_or1k[smp_processor_id()];
/* Flush the dcache for the requested range */
for (cl = addr; cl < addr + size; cl += cpuinfo->dcache_block_size)
mtspr(SPR_DCBFR, cl);
}
const struct dma_map_ops or1k_dma_map_ops = { const struct dma_map_ops or1k_dma_map_ops = {
.alloc = or1k_dma_alloc, .alloc = or1k_dma_alloc,
.free = or1k_dma_free, .free = or1k_dma_free,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment