Commit d3b9f659 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Michal Simek

microblaze/nommu: use the generic uncached segment support

Stop providing our own arch alloc/free hooks for nommu platforms and
just expose the segment offset and use the generic dma-direct
allocator.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarMichal Simek <michal.simek@xilinx.com>
parent a55aa89a
...@@ -5,9 +5,11 @@ config MICROBLAZE ...@@ -5,9 +5,11 @@ config MICROBLAZE
select ARCH_NO_SWAP select ARCH_NO_SWAP
select ARCH_HAS_BINFMT_FLAT if !MMU select ARCH_HAS_BINFMT_FLAT if !MMU
select ARCH_HAS_DMA_COHERENT_TO_PFN if MMU select ARCH_HAS_DMA_COHERENT_TO_PFN if MMU
select ARCH_HAS_DMA_PREP_COHERENT
select ARCH_HAS_GCOV_PROFILE_ALL select ARCH_HAS_GCOV_PROFILE_ALL
select ARCH_HAS_SYNC_DMA_FOR_CPU select ARCH_HAS_SYNC_DMA_FOR_CPU
select ARCH_HAS_SYNC_DMA_FOR_DEVICE select ARCH_HAS_SYNC_DMA_FOR_DEVICE
select ARCH_HAS_UNCACHED_SEGMENT if !MMU
select ARCH_MIGHT_HAVE_PC_PARPORT select ARCH_MIGHT_HAVE_PC_PARPORT
select ARCH_NO_COHERENT_DMA_MMAP if !MMU select ARCH_NO_COHERENT_DMA_MMAP if !MMU
select ARCH_WANT_IPC_PARSE_VERSION select ARCH_WANT_IPC_PARSE_VERSION
......
...@@ -42,21 +42,48 @@ ...@@ -42,21 +42,48 @@
#include <asm/cpuinfo.h> #include <asm/cpuinfo.h>
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
#ifndef CONFIG_MMU void arch_dma_prep_coherent(struct page *page, size_t size)
/* I have to use dcache values because I can't relate on ram size */ {
# define UNCACHED_SHADOW_MASK (cpuinfo.dcache_high - cpuinfo.dcache_base + 1) phys_addr_t paddr = page_to_phys(page);
#endif
flush_dcache_range(paddr, paddr + size);
}
#ifndef CONFIG_MMU
/* /*
* Consistent memory allocators. Used for DMA devices that want to * Consistent memory allocators. Used for DMA devices that want to share
* share uncached memory with the processor core. * uncached memory with the processor core. My crufty no-MMU approach is
* My crufty no-MMU approach is simple. In the HW platform we can optionally * simple. In the HW platform we can optionally mirror the DDR up above the
* mirror the DDR up above the processor cacheable region. So, memory accessed * processor cacheable region. So, memory accessed in this mirror region will
* in this mirror region will not be cached. It's alloced from the same * not be cached. It's alloced from the same pool as normal memory, but the
* pool as normal memory, but the handle we return is shifted up into the * handle we return is shifted up into the uncached region. This will no doubt
* uncached region. This will no doubt cause big problems if memory allocated * cause big problems if memory allocated here is not also freed properly. -- JW
* here is not also freed properly. -- JW *
* I have to use dcache values because I can't relate on ram size:
*/ */
#ifdef CONFIG_XILINX_UNCACHED_SHADOW
#define UNCACHED_SHADOW_MASK (cpuinfo.dcache_high - cpuinfo.dcache_base + 1)
#else
#define UNCACHED_SHADOW_MASK 0
#endif /* CONFIG_XILINX_UNCACHED_SHADOW */
void *uncached_kernel_address(void *ptr)
{
unsigned long addr = (unsigned long)ptr;
addr |= UNCACHED_SHADOW_MASK;
if (addr > cpuinfo.dcache_base && addr < cpuinfo.dcache_high)
pr_warn("ERROR: Your cache coherent area is CACHED!!!\n");
return (void *)addr;
}
void *cached_kernel_address(void *ptr)
{
unsigned long addr = (unsigned long)ptr;
return (void *)(addr & ~UNCACHED_SHADOW_MASK);
}
#else /* CONFIG_MMU */
void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
gfp_t gfp, unsigned long attrs) gfp_t gfp, unsigned long attrs)
{ {
...@@ -64,12 +91,9 @@ void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, ...@@ -64,12 +91,9 @@ void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
void *ret; void *ret;
unsigned int i, err = 0; unsigned int i, err = 0;
struct page *page, *end; struct page *page, *end;
#ifdef CONFIG_MMU
phys_addr_t pa; phys_addr_t pa;
struct vm_struct *area; struct vm_struct *area;
unsigned long va; unsigned long va;
#endif
if (in_interrupt()) if (in_interrupt())
BUG(); BUG();
...@@ -86,26 +110,8 @@ void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, ...@@ -86,26 +110,8 @@ void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
* we need to ensure that there are no cachelines in use, * we need to ensure that there are no cachelines in use,
* or worse dirty in this area. * or worse dirty in this area.
*/ */
flush_dcache_range(virt_to_phys((void *)vaddr), arch_dma_prep_coherent(virt_to_page((unsigned long)vaddr), size);
virt_to_phys((void *)vaddr) + size);
#ifndef CONFIG_MMU
ret = (void *)vaddr;
/*
* Here's the magic! Note if the uncached shadow is not implemented,
* it's up to the calling code to also test that condition and make
* other arranegments, such as manually flushing the cache and so on.
*/
# ifdef CONFIG_XILINX_UNCACHED_SHADOW
ret = (void *)((unsigned) ret | UNCACHED_SHADOW_MASK);
# endif
if ((unsigned int)ret > cpuinfo.dcache_base &&
(unsigned int)ret < cpuinfo.dcache_high)
pr_warn("ERROR: Your cache coherent area is CACHED!!!\n");
/* dma_handle is same as physical (shadowed) address */
*dma_handle = (dma_addr_t)ret;
#else
/* Allocate some common virtual space to map the new pages. */ /* Allocate some common virtual space to map the new pages. */
area = get_vm_area(size, VM_ALLOC); area = get_vm_area(size, VM_ALLOC);
if (!area) { if (!area) {
...@@ -117,7 +123,6 @@ void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, ...@@ -117,7 +123,6 @@ void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
/* This gives us the real physical address of the first page. */ /* This gives us the real physical address of the first page. */
*dma_handle = pa = __virt_to_phys(vaddr); *dma_handle = pa = __virt_to_phys(vaddr);
#endif
/* /*
* free wasted pages. We skip the first page since we know * free wasted pages. We skip the first page since we know
...@@ -131,10 +136,8 @@ void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, ...@@ -131,10 +136,8 @@ void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
split_page(page, order); split_page(page, order);
for (i = 0; i < size && err == 0; i += PAGE_SIZE) { for (i = 0; i < size && err == 0; i += PAGE_SIZE) {
#ifdef CONFIG_MMU
/* MS: This is the whole magic - use cache inhibit pages */ /* MS: This is the whole magic - use cache inhibit pages */
err = map_page(va + i, pa + i, _PAGE_KERNEL | _PAGE_NO_CACHE); err = map_page(va + i, pa + i, _PAGE_KERNEL | _PAGE_NO_CACHE);
#endif
SetPageReserved(page); SetPageReserved(page);
page++; page++;
...@@ -154,7 +157,6 @@ void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, ...@@ -154,7 +157,6 @@ void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
return ret; return ret;
} }
#ifdef CONFIG_MMU
static pte_t *consistent_virt_to_pte(void *vaddr) static pte_t *consistent_virt_to_pte(void *vaddr)
{ {
unsigned long addr = (unsigned long)vaddr; unsigned long addr = (unsigned long)vaddr;
...@@ -172,7 +174,6 @@ long arch_dma_coherent_to_pfn(struct device *dev, void *vaddr, ...@@ -172,7 +174,6 @@ long arch_dma_coherent_to_pfn(struct device *dev, void *vaddr,
return pte_pfn(*ptep); return pte_pfn(*ptep);
} }
#endif
/* /*
* free page(s) as defined by the above mapping. * free page(s) as defined by the above mapping.
...@@ -187,18 +188,6 @@ void arch_dma_free(struct device *dev, size_t size, void *vaddr, ...@@ -187,18 +188,6 @@ void arch_dma_free(struct device *dev, size_t size, void *vaddr,
size = PAGE_ALIGN(size); size = PAGE_ALIGN(size);
#ifndef CONFIG_MMU
/* Clear SHADOW_MASK bit in address, and free as per usual */
# ifdef CONFIG_XILINX_UNCACHED_SHADOW
vaddr = (void *)((unsigned)vaddr & ~UNCACHED_SHADOW_MASK);
# endif
page = virt_to_page(vaddr);
do {
__free_reserved_page(page);
page++;
} while (size -= PAGE_SIZE);
#else
do { do {
pte_t *ptep = consistent_virt_to_pte(vaddr); pte_t *ptep = consistent_virt_to_pte(vaddr);
unsigned long pfn; unsigned long pfn;
...@@ -216,5 +205,5 @@ void arch_dma_free(struct device *dev, size_t size, void *vaddr, ...@@ -216,5 +205,5 @@ void arch_dma_free(struct device *dev, size_t size, void *vaddr,
/* flush tlb */ /* flush tlb */
flush_tlb_all(); flush_tlb_all();
#endif
} }
#endif /* CONFIG_MMU */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment