Commit 10eacf17 authored by Russell King's avatar Russell King

[ARM] Clean up ARM cache handling interfaces (part 1)

This starts to move the ARM cache handling interface towards a
purpose-defined rather than functionality-defined interface.
This is necessary so we are able to support a wide range of ARM
CPUs.
parent d7181b4b
...@@ -161,11 +161,11 @@ void *consistent_alloc(int gfp, size_t size, dma_addr_t *handle, ...@@ -161,11 +161,11 @@ void *consistent_alloc(int gfp, size_t size, dma_addr_t *handle,
/* /*
* Invalidate any data that might be lurking in the * Invalidate any data that might be lurking in the
* kernel direct-mapped region. * kernel direct-mapped region for device DMA.
*/ */
{ {
unsigned long kaddr = (unsigned long)page_address(page); unsigned long kaddr = (unsigned long)page_address(page);
invalidate_dcache_range(kaddr, kaddr + size); dmac_inv_range(kaddr, kaddr + size);
} }
/* /*
...@@ -330,7 +330,7 @@ static int __init consistent_init(void) ...@@ -330,7 +330,7 @@ static int __init consistent_init(void)
core_initcall(consistent_init); core_initcall(consistent_init);
/* /*
* make an area consistent. * make an area consistent for devices.
*/ */
void consistent_sync(void *vaddr, size_t size, int direction) void consistent_sync(void *vaddr, size_t size, int direction)
{ {
...@@ -339,13 +339,13 @@ void consistent_sync(void *vaddr, size_t size, int direction) ...@@ -339,13 +339,13 @@ void consistent_sync(void *vaddr, size_t size, int direction)
switch (direction) { switch (direction) {
case DMA_FROM_DEVICE: /* invalidate only */ case DMA_FROM_DEVICE: /* invalidate only */
invalidate_dcache_range(start, end); dmac_inv_range(start, end);
break; break;
case DMA_TO_DEVICE: /* writeback only */ case DMA_TO_DEVICE: /* writeback only */
clean_dcache_range(start, end); dmac_clean_range(start, end);
break; break;
case DMA_BIDIRECTIONAL: /* writeback and invalidate */ case DMA_BIDIRECTIONAL: /* writeback and invalidate */
flush_dcache_range(start, end); dmac_flush_range(start, end);
break; break;
default: default:
BUG(); BUG();
......
...@@ -29,43 +29,54 @@ ...@@ -29,43 +29,54 @@
*/ */
/* /*
* Generic I + D cache * These are private to the dma-mapping API. Do not use directly.
* Their sole purpose is to ensure that data held in the cache
* is visible to DMA, or data written by DMA to system memory is
* visible to the CPU.
*/ */
#define flush_cache_all() \ #define dmac_inv_range cpu_dcache_invalidate_range
do { \ #define dmac_clean_range cpu_dcache_clean_range
cpu_cache_clean_invalidate_all(); \ #define dmac_flush_range(_s,_e) cpu_cache_clean_invalidate_range((_s),(_e),0)
} while (0)
/* This is always called for current->mm */ /*
#define flush_cache_mm(_mm) \ * Convert calls to our calling convention.
do { \ */
if ((_mm) == current->active_mm) \ #define flush_cache_all() cpu_cache_clean_invalidate_all()
cpu_cache_clean_invalidate_all(); \
} while (0)
#define flush_cache_range(_vma,_start,_end) \ static inline void flush_cache_mm(struct mm_struct *mm)
do { \ {
if ((_vma)->vm_mm == current->active_mm) \ if (current->active_mm == mm)
cpu_cache_clean_invalidate_range((_start), (_end), 1); \ cpu_cache_clean_invalidate_all();
} while (0) }
#define flush_cache_page(_vma,_vmaddr) \ static inline void
do { \ flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
if ((_vma)->vm_mm == current->active_mm) { \ {
cpu_cache_clean_invalidate_range((_vmaddr), \ if (current->active_mm == vma->vm_mm)
(_vmaddr) + PAGE_SIZE, \ cpu_cache_clean_invalidate_range(start & PAGE_MASK,
((_vma)->vm_flags & VM_EXEC)); \ PAGE_ALIGN(end), vma->vm_flags);
} \ }
} while (0)
static inline void
flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr)
{
if (current->active_mm == vma->vm_mm) {
unsigned long addr = user_addr & PAGE_MASK;
cpu_cache_clean_invalidate_range(addr, addr + PAGE_SIZE,
vma->vm_flags & VM_EXEC);
}
}
/* /*
* D cache only * Perform necessary cache operations to ensure that data previously
* stored within this range of addresses can be executed by the CPU.
*/ */
#define flush_icache_range(s,e) cpu_icache_invalidate_range(s,e)
#define invalidate_dcache_range(_s,_e) cpu_dcache_invalidate_range((_s),(_e)) /*
#define clean_dcache_range(_s,_e) cpu_dcache_clean_range((_s),(_e)) * Perform necessary cache operations to ensure that the TLB will
#define flush_dcache_range(_s,_e) cpu_cache_clean_invalidate_range((_s),(_e),0) * see data written in the specified area.
*/
#define clean_dcache_area(start,size) \ #define clean_dcache_area(start,size) \
cpu_cache_clean_invalidate_range((unsigned long)start, \ cpu_cache_clean_invalidate_range((unsigned long)start, \
((unsigned long)start) + size, 0); ((unsigned long)start) + size, 0);
...@@ -104,18 +115,3 @@ static inline void flush_dcache_page(struct page *page) ...@@ -104,18 +115,3 @@ static inline void flush_dcache_page(struct page *page)
* duplicate cache flushing elsewhere performed by flush_dcache_page(). * duplicate cache flushing elsewhere performed by flush_dcache_page().
*/ */
#define flush_icache_page(vma,page) do { } while (0) #define flush_icache_page(vma,page) do { } while (0)
/*
* I cache coherency stuff.
*
* This *is not* just icache. It is to make data written to memory
* consistent such that instructions fetched from the region are what
* we expect.
*
* This generally means that we have to clean out the Dcache and write
* buffers, and maybe flush the Icache in the specified range.
*/
#define flush_icache_range(_s,_e) \
do { \
cpu_icache_invalidate_range((_s), (_e)); \
} while (0)
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment