Commit a4a4d11a authored by Christoph Hellwig's avatar Christoph Hellwig

openrisc: use the generic in-place uncached DMA allocator

Switch openrisc to use the dma-direct allocator and just provide the
hooks for setting memory uncached or cached.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarStafford Horne <shorne@gmail.com>
Reviewed-by: default avatarRobin Murphy <robin.murphy@arm.com>
parent 999a5d12
...@@ -7,6 +7,8 @@ ...@@ -7,6 +7,8 @@
config OPENRISC config OPENRISC
def_bool y def_bool y
select ARCH_32BIT_OFF_T select ARCH_32BIT_OFF_T
select ARCH_HAS_DMA_SET_UNCACHED
select ARCH_HAS_DMA_CLEAR_UNCACHED
select ARCH_HAS_SYNC_DMA_FOR_DEVICE select ARCH_HAS_SYNC_DMA_FOR_DEVICE
select OF select OF
select OF_EARLY_FLATTREE select OF_EARLY_FLATTREE
......
...@@ -11,8 +11,6 @@ ...@@ -11,8 +11,6 @@
* Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se> * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
* *
* DMA mapping callbacks... * DMA mapping callbacks...
* As alloc_coherent is the only DMA callback being used currently, that's
* the only thing implemented properly. The rest need looking into...
*/ */
#include <linux/dma-noncoherent.h> #include <linux/dma-noncoherent.h>
...@@ -67,62 +65,29 @@ static const struct mm_walk_ops clear_nocache_walk_ops = { ...@@ -67,62 +65,29 @@ static const struct mm_walk_ops clear_nocache_walk_ops = {
.pte_entry = page_clear_nocache, .pte_entry = page_clear_nocache,
}; };
/* void *arch_dma_set_uncached(void *cpu_addr, size_t size)
* Alloc "coherent" memory, which for OpenRISC means simply uncached.
*
* This function effectively just calls __get_free_pages, sets the
* cache-inhibit bit on those pages, and makes sure that the pages are
* flushed out of the cache before they are used.
*
* If the NON_CONSISTENT attribute is set, then this function just
* returns "normal", cachable memory.
*
* There are additional flags WEAK_ORDERING and WRITE_COMBINE to take
* into consideration here, too. All current known implementations of
* the OR1K support only strongly ordered memory accesses, so that flag
* is being ignored for now; uncached but write-combined memory is a
* missing feature of the OR1K.
*/
void *
arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
gfp_t gfp, unsigned long attrs)
{ {
unsigned long va; unsigned long va = (unsigned long)cpu_addr;
void *page; int error;
page = alloc_pages_exact(size, gfp | __GFP_ZERO);
if (!page)
return NULL;
/* This gives us the real physical address of the first page. */
*dma_handle = __pa(page);
va = (unsigned long)page;
/* /*
* We need to iterate through the pages, clearing the dcache for * We need to iterate through the pages, clearing the dcache for
* them and setting the cache-inhibit bit. * them and setting the cache-inhibit bit.
*/ */
if (walk_page_range(&init_mm, va, va + size, &set_nocache_walk_ops, error = walk_page_range(&init_mm, va, va + size, &set_nocache_walk_ops,
NULL)) { NULL);
free_pages_exact(page, size); if (error)
return NULL; return ERR_PTR(error);
} return cpu_addr;
return (void *)va;
} }
void void arch_dma_clear_uncached(void *cpu_addr, size_t size)
arch_dma_free(struct device *dev, size_t size, void *vaddr,
dma_addr_t dma_handle, unsigned long attrs)
{ {
unsigned long va = (unsigned long)vaddr; unsigned long va = (unsigned long)cpu_addr;
/* walk_page_range shouldn't be able to fail here */ /* walk_page_range shouldn't be able to fail here */
WARN_ON(walk_page_range(&init_mm, va, va + size, WARN_ON(walk_page_range(&init_mm, va, va + size,
&clear_nocache_walk_ops, NULL)); &clear_nocache_walk_ops, NULL));
free_pages_exact(vaddr, size);
} }
void arch_sync_dma_for_device(phys_addr_t addr, size_t size, void arch_sync_dma_for_device(phys_addr_t addr, size_t size,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment