Commit d37d9068 authored by Russell King's avatar Russell King

[ARM] Part 2 in the cache API changes.

This is the new API; we now have methods for handling DMA which are
separate from those handling the TLB consistency issues, which are
in turn separate from the methods handling the cache coherency
issues.

Implementations are, however, free to alias these methods internally.
parent 10eacf17
...@@ -74,6 +74,9 @@ struct cpu_tlb_fns cpu_tlb; ...@@ -74,6 +74,9 @@ struct cpu_tlb_fns cpu_tlb;
#ifdef MULTI_USER #ifdef MULTI_USER
struct cpu_user_fns cpu_user; struct cpu_user_fns cpu_user;
#endif #endif
#ifdef MULTI_CACHE
struct cpu_cache_fns cpu_cache;
#endif
unsigned char aux_device_present; unsigned char aux_device_present;
char elf_platform[ELF_PLATFORM_SIZE]; char elf_platform[ELF_PLATFORM_SIZE];
...@@ -282,6 +285,9 @@ static void __init setup_processor(void) ...@@ -282,6 +285,9 @@ static void __init setup_processor(void)
#ifdef MULTI_USER #ifdef MULTI_USER
cpu_user = *list->user; cpu_user = *list->user;
#endif #endif
#ifdef MULTI_CACHE
cpu_cache = *list->cache;
#endif
printk("CPU: %s [%08x] revision %d (ARMv%s)\n", printk("CPU: %s [%08x] revision %d (ARMv%s)\n",
cpu_name, processor_id, (int)processor_id & 15, cpu_name, processor_id, (int)processor_id & 15,
......
...@@ -20,16 +20,16 @@ obj-$(CONFIG_DISCONTIGMEM) += discontig.o ...@@ -20,16 +20,16 @@ obj-$(CONFIG_DISCONTIGMEM) += discontig.o
p-$(CONFIG_CPU_26) += proc-arm2_3.o p-$(CONFIG_CPU_26) += proc-arm2_3.o
# ARMv3 # ARMv3
p-$(CONFIG_CPU_ARM610) += proc-arm6_7.o tlb-v3.o copypage-v3.o p-$(CONFIG_CPU_ARM610) += proc-arm6_7.o tlb-v3.o cache-v3.o copypage-v3.o
p-$(CONFIG_CPU_ARM710) += proc-arm6_7.o tlb-v3.o copypage-v3.o p-$(CONFIG_CPU_ARM710) += proc-arm6_7.o tlb-v3.o cache-v3.o copypage-v3.o
# ARMv4 # ARMv4
p-$(CONFIG_CPU_ARM720T) += proc-arm720.o tlb-v4.o copypage-v4wt.o abort-lv4t.o p-$(CONFIG_CPU_ARM720T) += proc-arm720.o tlb-v4.o cache-v4.o copypage-v4wt.o abort-lv4t.o
p-$(CONFIG_CPU_ARM920T) += proc-arm920.o tlb-v4wbi.o copypage-v4wb.o abort-ev4t.o p-$(CONFIG_CPU_ARM920T) += proc-arm920.o tlb-v4wbi.o cache-v4wt.o copypage-v4wb.o abort-ev4t.o
p-$(CONFIG_CPU_ARM922T) += proc-arm922.o tlb-v4wbi.o copypage-v4wb.o abort-ev4t.o p-$(CONFIG_CPU_ARM922T) += proc-arm922.o tlb-v4wbi.o cache-v4wt.o copypage-v4wb.o abort-ev4t.o
p-$(CONFIG_CPU_ARM1020) += proc-arm1020.o tlb-v4wbi.o copypage-v4wb.o abort-ev4t.o p-$(CONFIG_CPU_ARM1020) += proc-arm1020.o tlb-v4wbi.o cache-v4wt.o copypage-v4wb.o abort-ev4t.o
p-$(CONFIG_CPU_SA110) += proc-sa110.o tlb-v4wb.o copypage-v4wb.o abort-ev4.o minicache.o p-$(CONFIG_CPU_SA110) += proc-sa110.o tlb-v4wb.o cache-v4wb.o copypage-v4wb.o abort-ev4.o
p-$(CONFIG_CPU_SA1100) += proc-sa110.o tlb-v4wb.o copypage-v4mc.o abort-ev4.o minicache.o p-$(CONFIG_CPU_SA1100) += proc-sa1100.o tlb-v4wb.o cache-v4wb.o copypage-v4mc.o abort-ev4.o minicache.o
# ARMv5 # ARMv5
p-$(CONFIG_CPU_ARM926T) += proc-arm926.o tlb-v4wbi.o copypage-v4wb.o abort-ev5tej.o p-$(CONFIG_CPU_ARM926T) += proc-arm926.o tlb-v4wbi.o copypage-v4wb.o abort-ev5tej.o
......
/*
* linux/arch/arm/mm/cache-v3.S
*
* Copyright (C) 1997-2002 Russell king
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/linkage.h>
#include <asm/hardware.h>
#include <asm/page.h>
#include "proc-macros.S"
/*
* flush_user_cache_all()
*
* Invalidate all cache entries in a particular address
* space.
*
* - mm - mm_struct describing address space
*/
ENTRY(v3_flush_user_cache_all)
/* FALLTHROUGH */
/*
* flush_kern_cache_all()
*
* Clean and invalidate the entire cache.
*/
ENTRY(v3_flush_kern_cache_all)
/* FALLTHROUGH */
/*
* flush_user_cache_range(start, end, vm_flags)
*
* Invalidate a range of cache entries in the specified
* address space.
*
* - start - start address (may not be aligned)
* - end - end address (exclusive, may not be aligned)
* - vma - vma_area_struct describing address space
*/
ENTRY(v3_flush_user_cache_range)
mov ip, #0
mcreq p15, 0, ip, c7, c0, 0 @ flush ID cache
mov pc, lr
/*
* coherent_kern_range(start, end)
*
* Ensure coherency between the Icache and the Dcache in the
* region described by start. If you have non-snooping
* Harvard caches, you need to implement this function.
*
* - start - virtual start address
* - end - virtual end address
*/
ENTRY(v3_coherent_kern_range)
mov pc, lr
/*
* flush_kern_dcache_page(void *page)
*
* Ensure no D cache aliasing occurs, either with itself or
* the I cache
*
* - addr - page aligned address
*/
ENTRY(v3_flush_kern_dcache_page)
/* FALLTHROUGH */
/*
* dma_inv_range(start, end)
*
* Invalidate (discard) the specified virtual address range.
* May not write back any entries. If 'start' or 'end'
* are not cache line aligned, those lines must be written
* back.
*
* - start - virtual start address
* - end - virtual end address
*/
ENTRY(v3_dma_inv_range)
/* FALLTHROUGH */
/*
* dma_flush_range(start, end)
*
* Clean and invalidate the specified virtual address range.
*
* - start - virtual start address
* - end - virtual end address
*/
ENTRY(v3_dma_flush_range)
mov r0, #0
mcr p15, 0, r0, c7, c0, 0 @ flush ID cache
/* FALLTHROUGH */
/*
* dma_clean_range(start, end)
*
* Clean (write back) the specified virtual address range.
*
* - start - virtual start address
* - end - virtual end address
*/
ENTRY(v3_dma_clean_range)
mov pc, lr
ENTRY(v3_cache_fns)
.long v3_flush_kern_cache_all
.long v3_flush_user_cache_all
.long v3_flush_user_cache_range
.long v3_coherent_kern_range
.long v3_flush_kern_dcache_page
.long v3_dma_inv_range
.long v3_dma_clean_range
.long v3_dma_flush_range
/*
* linux/arch/arm/mm/cache-v4.S
*
* Copyright (C) 1997-2002 Russell king
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/linkage.h>
#include <asm/hardware.h>
#include <asm/page.h>
#include "proc-macros.S"
/*
* flush_user_cache_all()
*
* Invalidate all cache entries in a particular address
* space.
*
* - mm - mm_struct describing address space
*/
ENTRY(v4_flush_user_cache_all)
/* FALLTHROUGH */
/*
* flush_kern_cache_all()
*
* Clean and invalidate the entire cache.
*/
ENTRY(v4_flush_kern_cache_all)
mov r0, #0
mcr p15, 0, r0, c7, c7, 0 @ flush ID cache
mov pc, lr
/*
* flush_user_cache_range(start, end, vma)
*
* Invalidate a range of cache entries in the specified
* address space.
*
* - start - start address (may not be aligned)
* - end - end address (exclusive, may not be aligned)
* - vma - vma_area_struct describing address space
*/
ENTRY(v4_flush_user_cache_range)
mov ip, #0
mcreq p15, 0, ip, c7, c7, 0 @ flush ID cache
mov pc, lr
/*
* coherent_kern_range(start, end)
*
* Ensure coherency between the Icache and the Dcache in the
* region described by start. If you have non-snooping
* Harvard caches, you need to implement this function.
*
* - start - virtual start address
* - end - virtual end address
*/
ENTRY(v4_coherent_kern_range)
mov pc, lr
/*
* flush_kern_dcache_page(void *page)
*
* Ensure no D cache aliasing occurs, either with itself or
* the I cache
*
* - addr - page aligned address
*/
ENTRY(v4_flush_kern_dcache_page)
/* FALLTHROUGH */
/*
* dma_inv_range(start, end)
*
* Invalidate (discard) the specified virtual address range.
* May not write back any entries. If 'start' or 'end'
* are not cache line aligned, those lines must be written
* back.
*
* - start - virtual start address
* - end - virtual end address
*/
ENTRY(v4_dma_inv_range)
/* FALLTHROUGH */
/*
* dma_flush_range(start, end)
*
* Clean and invalidate the specified virtual address range.
*
* - start - virtual start address
* - end - virtual end address
*/
ENTRY(v4_dma_flush_range)
mov r0, #0
mcr p15, 0, r0, c7, c7, 0 @ flush ID cache
/* FALLTHROUGH */
/*
* dma_clean_range(start, end)
*
* Clean (write back) the specified virtual address range.
*
* - start - virtual start address
* - end - virtual end address
*/
ENTRY(v4_dma_clean_range)
mov pc, lr
ENTRY(v4_cache_fns)
.long v4_flush_kern_cache_all
.long v4_flush_user_cache_all
.long v4_flush_user_cache_range
.long v4_coherent_kern_range
.long v4_flush_kern_dcache_page
.long v4_dma_inv_range
.long v4_dma_clean_range
.long v4_dma_flush_range
/*
* linux/arch/arm/mm/cache-v4wb.S
*
* Copyright (C) 1997-2002 Russell king
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/linkage.h>
#include <asm/hardware.h>
#include <asm/page.h>
#include "proc-macros.S"
/*
* The size of one data cache line.
*/
#define CACHE_DLINESIZE 32
/*
* The total size of the data cache.
*/
#if defined(CONFIG_CPU_SA110)
# define CACHE_DSIZE 16384
#elif defined(CONFIG_CPU_SA1100)
# define CACHE_DSIZE 8192
#else
# error Unknown cache size
#endif
/*
* This is the size at which it becomes more efficient to
* clean the whole cache, rather than using the individual
* cache line maintainence instructions.
*
* Size Clean (ticks) Dirty (ticks)
* 4096 21 20 21 53 55 54
* 8192 40 41 40 106 100 102
* 16384 77 77 76 140 140 138
* 32768 150 149 150 214 216 212 <---
* 65536 296 297 296 351 358 361
* 131072 591 591 591 656 657 651
* Whole 132 136 132 221 217 207 <---
*/
#define CACHE_DLIMIT (CACHE_DSIZE * 4)
/*
* flush_user_cache_all()
*
* Clean and invalidate all cache entries in a particular address
* space.
*/
ENTRY(v4wb_flush_user_cache_all)
/* FALLTHROUGH */
/*
* flush_kern_cache_all()
*
* Clean and invalidate the entire cache.
*/
ENTRY(v4wb_flush_kern_cache_all)
mov ip, #0
mcr p15, 0, ip, c7, c5, 0 @ invalidate I cache
__flush_whole_cache:
mov r0, #FLUSH_BASE
add r1, r0, #CACHE_DSIZE
1: ldr r2, [r0], #32
cmp r0, r1
blo 1b
mcr p15, 0, ip, c7, c10, 4 @ drain write buffer
mov pc, lr
/*
* flush_user_cache_range(start, end, vm_flags)
*
* Invalidate a range of cache entries in the specified
* address space.
*
* - start - start address (inclusive, page aligned)
* - end - end address (exclusive, page aligned)
* - vma - vma_area_struct describing address space
*/
ENTRY(v4wb_flush_user_cache_range)
sub r3, r1, r0 @ calculate total size
tst r2, #VM_EXEC @ executable region?
mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
cmp r3, #CACHE_DLIMIT @ total size >= limit?
bhs __flush_whole_cache @ flush whole D cache
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
add r0, r0, #CACHE_DLINESIZE
cmp r0, r1
blo 1b
tst r2, #VM_EXEC
mcrne p15, 0, ip, c7, c10, 4 @ drain write buffer
mov pc, lr
/*
* flush_kern_dcache_page(void *page)
*
* Ensure no D cache aliasing occurs, either with itself or
* the I cache
*
* - addr - page aligned address
*/
ENTRY(v4wb_flush_kern_dcache_page)
add r1, r0, #PAGE_SZ
/* fall through */
/*
* coherent_kern_range(start, end)
*
* Ensure coherency between the Icache and the Dcache in the
* region described by start. If you have non-snooping
* Harvard caches, you need to implement this function.
*
* - start - virtual start address
* - end - virtual end address
*/
ENTRY(v4wb_coherent_kern_range)
bic r0, r0, #CACHE_DLINESIZE - 1
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
add r0, r0, #CACHE_DLINESIZE
cmp r0, r1
blo 1b
mov ip, #0
mcr p15, 0, ip, c7, c5, 0 @ invalidate I cache
mcr p15, 0, ip, c7, c10, 4 @ drain WB
mov pc, lr
/*
* dma_inv_range(start, end)
*
* Invalidate (discard) the specified virtual address range.
* May not write back any entries. If 'start' or 'end'
* are not cache line aligned, those lines must be written
* back.
*
* - start - virtual start address
* - end - virtual end address
*/
ENTRY(v4wb_dma_inv_range)
tst r0, #CACHE_DLINESIZE - 1
bic r0, r0, #CACHE_DLINESIZE - 1
mcrne p15, 0, r0, c7, c10, 1 @ clean D entry
tst r1, #CACHE_DLINESIZE - 1
mcrne p15, 0, r1, c7, c10, 1 @ clean D entry
1: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
add r0, r0, #CACHE_DLINESIZE
cmp r0, r1
blo 1b
mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
mov pc, lr
/*
* dma_clean_range(start, end)
*
* Clean (write back) the specified virtual address range.
*
* - start - virtual start address
* - end - virtual end address
*/
ENTRY(v4wb_dma_clean_range)
bic r0, r0, #CACHE_DLINESIZE - 1
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
add r0, r0, #CACHE_DLINESIZE
cmp r0, r1
blo 1b
mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
mov pc, lr
/*
* dma_flush_range(start, end)
*
* Clean and invalidate the specified virtual address range.
*
* - start - virtual start address
* - end - virtual end address
*
* This is actually the same as v4wb_coherent_kern_range()
*/
.globl v4wb_dma_flush_range
.set v4wb_dma_flush_range, v4wb_coherent_kern_range
ENTRY(v4wb_cache_fns)
.long v4wb_flush_kern_cache_all
.long v4wb_flush_user_cache_all
.long v4wb_flush_user_cache_range
.long v4wb_coherent_kern_range
.long v4wb_flush_kern_dcache_page
.long v4wb_dma_inv_range
.long v4wb_dma_clean_range
.long v4wb_dma_flush_range
/*
* linux/arch/arm/mm/cache-v4wt.S
*
* Copyright (C) 1997-2002 Russell king
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* ARMv4 write through cache operations support.
*
* We assume that the write buffer is not enabled.
*/
#include <linux/linkage.h>
#include <asm/hardware.h>
#include <asm/page.h>
#include "proc-macros.S"
/*
* The size of one data cache line.
*/
#define CACHE_DLINESIZE 32
/*
* The number of data cache segments.
*/
#define CACHE_DSEGMENTS 8
/*
* The number of lines in a cache segment.
*/
#define CACHE_DENTRIES 64
/*
* This is the size at which it becomes more efficient to
* clean the whole cache, rather than using the individual
* cache line maintainence instructions.
*
* *** This needs benchmarking
*/
#define CACHE_DLIMIT 16384
/*
* flush_user_cache_all()
*
* Invalidate all cache entries in a particular address
* space.
*/
ENTRY(v4wt_flush_user_cache_all)
/* FALLTHROUGH */
/*
* flush_kern_cache_all()
*
* Clean and invalidate the entire cache.
*/
ENTRY(v4wt_flush_kern_cache_all)
mov r2, #VM_EXEC
mov ip, #0
__flush_whole_cache:
tst r2, #VM_EXEC
mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
mcr p15, 0, ip, c7, c6, 0 @ invalidate D cache
mov pc, lr
/*
* flush_user_cache_range(start, end, vm_flags)
*
* Clean and invalidate a range of cache entries in the specified
* address space.
*
* - start - start address (inclusive, page aligned)
* - end - end address (exclusive, page aligned)
* - vma - vma_area_struct describing address space
*/
ENTRY(v4wt_flush_user_cache_range)
sub r3, r1, r0 @ calculate total size
cmp r3, #CACHE_DLIMIT
bhs __flush_whole_cache
1: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
tst r2, #VM_EXEC
mcrne p15, 0, r0, c7, c5, 1 @ invalidate I entry
add r0, r0, #CACHE_DLINESIZE
cmp r0, r1
blo 1b
mov pc, lr
/*
* coherent_kern_range(start, end)
*
* Ensure coherency between the Icache and the Dcache in the
* region described by start. If you have non-snooping
* Harvard caches, you need to implement this function.
*
* - start - virtual start address
* - end - virtual end address
*/
ENTRY(v4wt_coherent_kern_range)
bic r0, r0, #CACHE_DLINESIZE - 1
1: mcr p15, 0, r0, c7, c5, 1 @ invalidate I entry
add r0, r0, #CACHE_DLINESIZE
cmp r0, r1
blo 1b
mov pc, lr
/*
* flush_kern_dcache_page(void *page)
*
* Ensure no D cache aliasing occurs, either with itself or
* the I cache
*
* - addr - page aligned address
*/
ENTRY(v4wt_flush_kern_dcache_page)
mov r2, #0
mcr p15, 0, r2, c7, c5, 0 @ invalidate I cache
add r1, r0, #PAGE_SZ
/* fallthrough */
/*
* dma_inv_range(start, end)
*
* Invalidate (discard) the specified virtual address range.
* May not write back any entries. If 'start' or 'end'
* are not cache line aligned, those lines must be written
* back.
*
* - start - virtual start address
* - end - virtual end address
*/
ENTRY(v4wt_dma_inv_range)
bic r0, r0, #CACHE_DLINESIZE - 1
1: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
add r0, r0, #CACHE_DLINESIZE
cmp r0, r1
blo 1b
/* FALLTHROUGH */
/*
* dma_clean_range(start, end)
*
* Clean the specified virtual address range.
*
* - start - virtual start address
* - end - virtual end address
*/
ENTRY(v4wt_dma_clean_range)
mov pc, lr
/*
* dma_flush_range(start, end)
*
* Clean and invalidate the specified virtual address range.
*
* - start - virtual start address
* - end - virtual end address
*/
.globl v4wt_dma_flush_range
.equ v4wt_dma_flush_range, v4wt_dma_inv_range
ENTRY(v4wt_cache_fns)
.long v4wt_flush_kern_cache_all
.long v4wt_flush_user_cache_all
.long v4wt_flush_user_cache_range
.long v4wt_coherent_kern_range
.long v4wt_flush_kern_dcache_page
.long v4wt_dma_inv_range
.long v4wt_dma_clean_range
.long v4wt_dma_flush_range
...@@ -184,9 +184,8 @@ void __flush_dcache_page(struct page *page) ...@@ -184,9 +184,8 @@ void __flush_dcache_page(struct page *page)
{ {
struct mm_struct *mm = current->active_mm; struct mm_struct *mm = current->active_mm;
struct list_head *l; struct list_head *l;
unsigned long kaddr = (unsigned long)page_address(page);
cpu_cache_clean_invalidate_range(kaddr, kaddr + PAGE_SIZE, 0); __cpuc_flush_dcache_page(page_address(page));
if (!page->mapping) if (!page->mapping)
return; return;
...@@ -291,10 +290,9 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t pte) ...@@ -291,10 +290,9 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t pte)
page = pfn_to_page(pfn); page = pfn_to_page(pfn);
if (page->mapping) { if (page->mapping) {
int dirty = test_and_clear_bit(PG_dcache_dirty, &page->flags); int dirty = test_and_clear_bit(PG_dcache_dirty, &page->flags);
unsigned long kaddr = (unsigned long)page_address(page);
if (dirty) if (dirty)
cpu_cache_clean_invalidate_range(kaddr, kaddr + PAGE_SIZE, 0); __cpuc_flush_dcache_page(page_address(page));
make_coherent(vma, addr, page, dirty); make_coherent(vma, addr, page, dirty);
} }
......
...@@ -43,15 +43,26 @@ ...@@ -43,15 +43,26 @@
#define MAX_AREA_SIZE 32768 #define MAX_AREA_SIZE 32768
/* /*
* the cache line size of the I and D cache * The size of one data cache line.
*/ */
#define DCACHELINESIZE 32 #define CACHE_DLINESIZE 32
#define ICACHELINESIZE 32
/* /*
* and the page size * The number of data cache segments.
*/ */
#define PAGESIZE 4096 #define CACHE_DSEGMENTS 16
/*
* The number of lines in a cache segment.
*/
#define CACHE_DENTRIES 64
/*
* This is the size at which it becomes more efficient to
* clean the whole cache, rather than using the individual
* cache line maintainence instructions.
*/
#define CACHE_DLIMIT 32768
.text .text
/* /*
...@@ -114,217 +125,220 @@ ENTRY(cpu_arm1020_do_idle) ...@@ -114,217 +125,220 @@ ENTRY(cpu_arm1020_do_idle)
/* ================================= CACHE ================================ */ /* ================================= CACHE ================================ */
.align 5
/* /*
* cpu_arm1020_cache_clean_invalidate_all() * flush_user_cache_all()
* *
* clean and invalidate all cache lines * Invalidate all cache entries in a particular address
* space.
*/
ENTRY(arm1020_flush_user_cache_all)
/* FALLTHROUGH */
/*
* flush_kern_cache_all()
* *
* Note: * Clean and invalidate the entire cache.
* 1. we should preserve r0 at all times
*/ */
.align 5 ENTRY(arm1020_flush_kern_cache_all)
ENTRY(cpu_arm1020_cache_clean_invalidate_all) mov r2, #VM_EXEC
mov r2, #1 mov ip, #0
cpu_arm1020_cache_clean_invalidate_all_r2: __flush_whole_cache:
#ifndef CONFIG_CPU_DCACHE_DISABLE #ifndef CONFIG_CPU_DCACHE_DISABLE
mcr p15, 0, ip, c7, c10, 4 mcr p15, 0, ip, c7, c10, 4 @ drain WB
mov r1, #(CACHE_DSEGMENTS - 1) << 5 @ 16 segments
mov r1, #0xf @ 16 segments 1: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries
1: mov r3, #0x3F @ 64 entries 2: mcr p15, 0, r3, c7, c14, 2 @ clean+invalidate D index
2: mov ip, r3, LSL #26 @ shift up entry mcr p15, 0, ip, c7, c10, 4 @ drain WB
orr ip, ip, r1, LSL #5 @ shift in/up index subs r3, r3, #1 << 26
mcr p15, 0, ip, c7, c14, 2 @ Clean & Inval DCache entry bcs 2b @ entries 63 to 0
mcr p15, 0, ip, c7, c10, 4 @ drain WB subs r1, r1, #1 << 5
subs r3, r3, #1 bcs 1b @ segments 15 to 0
cmp r3, #0
bge 2b @ entries 3F to 0
subs r1, r1, #1
cmp r1, #0
bge 1b @ segments 7 to 0
#endif #endif
tst r2, #VM_EXEC
#ifndef CONFIG_CPU_ICACHE_DISABLE #ifndef CONFIG_CPU_ICACHE_DISABLE
teq r2, #0
mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
#endif #endif
mcr p15, 0, ip, c7, c10, 4 @ drain WB mcrne p15, 0, ip, c7, c10, 4 @ drain WB
mov pc, lr mov pc, lr
/* /*
* cpu_arm1020_cache_clean_invalidate_range(start, end, flags) * flush_user_cache_range(start, end, flags)
* *
* clean and invalidate all cache lines associated with this area of memory * Invalidate a range of cache entries in the specified
* address space.
* *
* start: Area start address * - start - start address (inclusive)
* end: Area end address * - end - end address (exclusive)
* flags: nonzero for I cache as well * - flags - vm_flags for this space
*/ */
.align 5 ENTRY(arm1020_flush_user_cache_range)
ENTRY(cpu_arm1020_cache_clean_invalidate_range) mov ip, #0
bic r0, r0, #DCACHELINESIZE - 1 sub r3, r1, r0 @ calculate total size
sub r3, r1, r0 cmp r3, #CACHE_DLIMIT
cmp r3, #MAX_AREA_SIZE bhs __flush_whole_cache
bgt cpu_arm1020_cache_clean_invalidate_all_r2
mcr p15, 0, r3, c7, c10, 4
#ifndef CONFIG_CPU_DCACHE_DISABLE #ifndef CONFIG_CPU_DCACHE_DISABLE
1: mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D entry mcr p15, 0, ip, c7, c10, 4
mcr p15, 0, r3, c7, c10, 4 @ drain WB 1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
add r0, r0, #DCACHELINESIZE mcr p15, 0, ip, c7, c10, 4 @ drain WB
mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D entry add r0, r0, #CACHE_DLINESIZE
mcr p15, 0, r3, c7, c10, 4 @ drain WB
add r0, r0, #DCACHELINESIZE
cmp r0, r1 cmp r0, r1
blt 1b blo 1b
#endif #endif
tst r2, #VM_EXEC
#ifndef CONFIG_CPU_ICACHE_DISABLE #ifndef CONFIG_CPU_ICACHE_DISABLE
teq r2, #0 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
movne r0, #0
mcrne p15, 0, r0, c7, c5, 0 @ invalidate I cache
#endif #endif
mcrne p15, 0, ip, c7, c10, 4 @ drain WB
mov pc, lr mov pc, lr
/* ================================ D-CACHE =============================== */
/* /*
* cpu_arm1020_dcache_invalidate_range(start, end) * coherent_kern_range(start, end)
* *
* throw away all D-cached data in specified region without an obligation * Ensure coherency between the Icache and the Dcache in the
* to write them back. Note however that we must clean the D-cached entries * region described by start. If you have non-snooping
* around the boundaries if the start and/or end address are not cache * Harvard caches, you need to implement this function.
* aligned.
* *
* start: virtual start address * - start - virtual start address
* end: virtual end address * - end - virtual end address
*/ */
.align 5 ENTRY(arm1020_coherent_kern_range)
ENTRY(cpu_arm1020_dcache_invalidate_range) mov ip, #0
bic r0, r0, #CACHE_DLINESIZE - 1
mcr p15, 0, ip, c7, c10, 4
1:
#ifndef CONFIG_CPU_DCACHE_DISABLE #ifndef CONFIG_CPU_DCACHE_DISABLE
/* D cache are on */ mcr p15, 0, r0, c7, c10, 1 @ clean D entry
tst r0, #DCACHELINESIZE - 1 mcr p15, 0, ip, c7, c10, 4 @ drain WB
bic r0, r0, #DCACHELINESIZE - 1
mcrne p15, 0, r0, c7, c10, 4
mcrne p15, 0, r0, c7, c10, 1 @ clean D entry at start
mcrne p15, 0, r0, c7, c10, 4 @ drain WB
tst r1, #DCACHELINESIZE - 1
mcrne p15, 0, r1, c7, c10, 4
mcrne p15, 0, r1, c7, c10, 1 @ clean D entry at end
mcrne p15, 0, r1, c7, c10, 4 @ drain WB
1: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
add r0, r0, #DCACHELINESIZE
cmp r0, r1
blt 1b
#else
/* D cache off, but still drain the write buffer */
mcr p15, 0, r0, c7, c10, 4 @ Drain write buffer
#endif #endif
#ifndef CONFIG_CPU_ICACHE_DISABLE
mcr p15, 0, r0, c7, c5, 1 @ invalidate I entry
#endif
add r0, r0, #CACHE_DLINESIZE
cmp r0, r1
blo 1b
mcr p15, 0, ip, c7, c10, 4 @ drain WB
mov pc, lr mov pc, lr
/* /*
* cpu_arm1020_dcache_clean_range(start, end) * flush_kern_dcache_page(void *page)
* *
* For the specified virtual address range, ensure that all caches contain * Ensure no D cache aliasing occurs, either with itself or
* clean data, such that peripheral accesses to the physical RAM fetch * the I cache
* correct data.
* *
* start: virtual start address * - page - page aligned address
* end: virtual end address
*/ */
.align 5 ENTRY(arm1020_flush_kern_dcache_page)
ENTRY(cpu_arm1020_dcache_clean_range) mov ip, #0
bic r0, r0, #DCACHELINESIZE - 1
sub r3, r1, r0
cmp r3, #MAX_AREA_SIZE
bgt cpu_arm1020_cache_clean_invalidate_all_r2
mcr p15, 0, r3, c7, c10, 4
#ifndef CONFIG_CPU_DCACHE_DISABLE #ifndef CONFIG_CPU_DCACHE_DISABLE
1: mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D entry add r1, r0, #PAGE_SZ
mcr p15, 0, r3, c7, c10, 4 @ drain WB 1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
add r0, r0, #DCACHELINESIZE mcr p15, 0, ip, c7, c10, 4 @ drain WB
mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D entry add r0, r0, #CACHE_DLINESIZE
mcr p15, 0, r3, c7, c10, 4 @ drain WB
add r0, r0, #DCACHELINESIZE
cmp r0, r1 cmp r0, r1
blt 1b blo 1b
#endif #endif
mcr p15, 0, ip, c7, c10, 4 @ drain WB
mov pc, lr mov pc, lr
/* /*
* cpu_arm1020_dcache_clean_page(page) * dma_inv_range(start, end)
* *
* Cleans a single page of dcache so that if we have any future aliased * Invalidate (discard) the specified virtual address range.
* mappings, they will be consistent at the time that they are created. * May not write back any entries. If 'start' or 'end'
* are not cache line aligned, those lines must be written
* back.
* *
* page: virtual address of page to clean from dcache * - start - virtual start address
* - end - virtual end address
* *
* Note: * (same as v4wb)
* 1. we don't need to flush the write buffer in this case.
* 2. we don't invalidate the entries since when we write the page
* out to disk, the entries may get reloaded into the cache.
*/ */
.align 5 ENTRY(arm1020_dma_inv_range)
ENTRY(cpu_arm1020_dcache_clean_page) mov ip, #0
mov r1, #PAGESIZE
mcr p15, 0, r0, c7, c10, 4
#ifndef CONFIG_CPU_DCACHE_DISABLE #ifndef CONFIG_CPU_DCACHE_DISABLE
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry (drain is done by TLB fns) tst r0, #CACHE_DLINESIZE - 1
mcr p15, 0, r0, c7, c10, 4 @ drain WB bic r0, r0, #CACHE_DLINESIZE - 1
add r0, r0, #DCACHELINESIZE mcrne p15, 0, ip, c7, c10, 4
mcr p15, 0, r0, c7, c10, 1 @ clean D entry mcrne p15, 0, r0, c7, c10, 1 @ clean D entry
mcr p15, 0, r0, c7, c10, 4 @ drain WB mcrne p15, 0, ip, c7, c10, 4 @ drain WB
add r0, r0, #DCACHELINESIZE tst r1, #CACHE_DLINESIZE - 1
subs r1, r1, #2 * DCACHELINESIZE mcrne p15, 0, ip, c7, c10, 4
bhi 1b mcrne p15, 0, r1, c7, c10, 1 @ clean D entry
mcrne p15, 0, ip, c7, c10, 4 @ drain WB
1: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
add r0, r0, #CACHE_DLINESIZE
cmp r0, r1
blo 1b
#endif #endif
mcr p15, 0, ip, c7, c10, 4 @ drain WB
mov pc, lr mov pc, lr
/* /*
* cpu_arm1020_dcache_clean_entry(addr) * dma_clean_range(start, end)
* *
* Clean the specified entry of any caches such that the MMU * Clean the specified virtual address range.
* translation fetches will obtain correct data.
* *
* addr: cache-unaligned virtual address * - start - virtual start address
* - end - virtual end address
*
* (same as v4wb)
*/ */
.align 5 ENTRY(arm1020_dma_clean_range)
ENTRY(cpu_arm1020_dcache_clean_entry) mov ip, #0
mov r1, #0
mcr p15, 0, r1, c7, c10, 4
#ifndef CONFIG_CPU_DCACHE_DISABLE #ifndef CONFIG_CPU_DCACHE_DISABLE
mcr p15, 0, r0, c7, c10, 1 @ clean single D entry bic r0, r0, #CACHE_DLINESIZE - 1
mcr p15, 0, r1, c7, c10, 4 @ drain WB 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
#endif mcr p15, 0, ip, c7, c10, 4 @ drain WB
#ifndef CONFIG_CPU_ICACHE_DISABLE add r0, r0, #CACHE_DLINESIZE
mcr p15, 0, r1, c7, c5, 1 @ invalidate I entry cmp r0, r1
blo 1b
#endif #endif
mcr p15, 0, ip, c7, c10, 4 @ drain WB
mov pc, lr mov pc, lr
/* ================================ I-CACHE =============================== */
/* /*
* cpu_arm1020_icache_invalidate_range(start, end) * dma_flush_range(start, end)
* *
* invalidate a range of virtual addresses from the Icache * Clean and invalidate the specified virtual address range.
* *
* start: virtual start address * - start - virtual start address
* end: virtual end address * - end - virtual end address
*/ */
.align 5 ENTRY(arm1020_dma_flush_range)
ENTRY(cpu_arm1020_icache_invalidate_range) mov ip, #0
1: mcr p15, 0, r0, c7, c10, 4
#ifndef CONFIG_CPU_DCACHE_DISABLE #ifndef CONFIG_CPU_DCACHE_DISABLE
mcr p15, 0, r0, c7, c10, 1 @ Clean D entry bic r0, r0, #CACHE_DLINESIZE - 1
mcr p15, 0, r0, c7, c10, 4 @ drain WB mcr p15, 0, ip, c7, c10, 4
add r0, r0, #DCACHELINESIZE 1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
mcr p15, 0, r0, c7, c10, 1 @ Clean D entry mcr p15, 0, ip, c7, c10, 4 @ drain WB
mcr p15, 0, r0, c7, c10, 4 @ drain WB add r0, r0, #CACHE_DLINESIZE
#endif
add r0, r0, #DCACHELINESIZE
cmp r0, r1 cmp r0, r1
blo 1b blo 1b
ENTRY(cpu_arm1020_icache_invalidate_page) #endif
mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache mcr p15, 0, ip, c7, c10, 4 @ drain WB
mov pc, lr
ENTRY(arm1020_cache_fns)
.long arm1020_flush_kern_cache_all
.long arm1020_flush_user_cache_all
.long arm1020_flush_user_cache_range
.long arm1020_coherent_kern_range
.long arm1020_flush_kern_dcache_page
.long arm1020_dma_inv_range
.long arm1020_dma_clean_range
.long arm1020_dma_flush_range
.align 5
ENTRY(cpu_arm1020_dcache_clean_area)
#ifndef CONFIG_CPU_DCACHE_DISABLE
mov ip, #0
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
mcr p15, 0, ip, c7, c10, 4 @ drain WB
add r0, r0, #CACHE_DLINESIZE
subs r1, r1, #CACHE_DLINESIZE
bhi 1b
#endif
mov pc, lr mov pc, lr
/* =============================== PageTable ============================== */ /* =============================== PageTable ============================== */
...@@ -379,7 +393,7 @@ ENTRY(cpu_arm1020_set_pte) ...@@ -379,7 +393,7 @@ ENTRY(cpu_arm1020_set_pte)
bic r2, r2, #3 bic r2, r2, #3
orr r2, r2, #HPTE_TYPE_SMALL orr r2, r2, #HPTE_TYPE_SMALL
tst r1, #LPTE_USER | LPTE_EXEC @ User or Exec? tst r1, #LPTE_USER @ User?
orrne r2, r2, #HPTE_AP_READ orrne r2, r2, #HPTE_AP_READ
tst r1, #LPTE_WRITE | LPTE_DIRTY @ Write and Dirty? tst r1, #LPTE_WRITE | LPTE_DIRTY @ Write and Dirty?
...@@ -477,22 +491,7 @@ arm1020_processor_functions: ...@@ -477,22 +491,7 @@ arm1020_processor_functions:
.word cpu_arm1020_proc_fin .word cpu_arm1020_proc_fin
.word cpu_arm1020_reset .word cpu_arm1020_reset
.word cpu_arm1020_do_idle .word cpu_arm1020_do_idle
.word cpu_arm1020_dcache_clean_area
/* cache */
.word cpu_arm1020_cache_clean_invalidate_all
.word cpu_arm1020_cache_clean_invalidate_range
/* dcache */
.word cpu_arm1020_dcache_invalidate_range
.word cpu_arm1020_dcache_clean_range
.word cpu_arm1020_dcache_clean_page
.word cpu_arm1020_dcache_clean_entry
/* icache */
.word cpu_arm1020_icache_invalidate_range
.word cpu_arm1020_icache_invalidate_page
/* pgtable */
.word cpu_arm1020_set_pgd .word cpu_arm1020_set_pgd
.word cpu_arm1020_set_pte .word cpu_arm1020_set_pte
...@@ -524,4 +523,5 @@ __arm1020_proc_info: ...@@ -524,4 +523,5 @@ __arm1020_proc_info:
.long arm1020_processor_functions .long arm1020_processor_functions
.long v4wbi_tlb_fns .long v4wbi_tlb_fns
.long v4wb_user_fns .long v4wb_user_fns
.long arm1020_cache_fns
.size __arm1020_proc_info, . - __arm1020_proc_info .size __arm1020_proc_info, . - __arm1020_proc_info
...@@ -37,35 +37,6 @@ ...@@ -37,35 +37,6 @@
#include <asm/procinfo.h> #include <asm/procinfo.h>
#include <asm/hardware.h> #include <asm/hardware.h>
/*
* Function: arm720_cache_clean_invalidate_all (void)
* : arm720_cache_clean_invalidate_page (unsigned long address, int size,
* int flags)
*
* Params : address Area start address
* : size size of area
* : flags b0 = I cache as well
*
* Purpose : Flush all cache lines
*/
ENTRY(cpu_arm720_cache_clean_invalidate_all)
ENTRY(cpu_arm720_cache_clean_invalidate_range)
ENTRY(cpu_arm720_icache_invalidate_range)
ENTRY(cpu_arm720_icache_invalidate_page)
ENTRY(cpu_arm720_dcache_invalidate_range)
mov r0, #0
mcr p15, 0, r0, c7, c7, 0 @ flush cache
mov pc, lr
/*
* These just expect cache lines to be cleaned. Since we have a writethrough
* cache, we never have any dirty cachelines to worry about.
*/
ENTRY(cpu_arm720_dcache_clean_range)
ENTRY(cpu_arm720_dcache_clean_page)
ENTRY(cpu_arm720_dcache_clean_entry)
mov pc, lr
/* /*
* Function: arm720_check_bugs (void) * Function: arm720_check_bugs (void)
* : arm720_proc_init (void) * : arm720_proc_init (void)
...@@ -79,6 +50,7 @@ ENTRY(cpu_arm720_check_bugs) ...@@ -79,6 +50,7 @@ ENTRY(cpu_arm720_check_bugs)
msr cpsr, ip msr cpsr, ip
mov pc, lr mov pc, lr
ENTRY(cpu_arm720_dcache_clean_area)
ENTRY(cpu_arm720_proc_init) ENTRY(cpu_arm720_proc_init)
mov pc, lr mov pc, lr
...@@ -130,7 +102,7 @@ ENTRY(cpu_arm720_set_pte) ...@@ -130,7 +102,7 @@ ENTRY(cpu_arm720_set_pte)
bic r2, r2, #3 bic r2, r2, #3
orr r2, r2, #HPTE_TYPE_SMALL orr r2, r2, #HPTE_TYPE_SMALL
tst r1, #LPTE_USER | LPTE_EXEC @ User or Exec? tst r1, #LPTE_USER @ User?
orrne r2, r2, #HPTE_AP_READ orrne r2, r2, #HPTE_AP_READ
tst r1, #LPTE_WRITE | LPTE_DIRTY @ Write and Dirty? tst r1, #LPTE_WRITE | LPTE_DIRTY @ Write and Dirty?
...@@ -189,25 +161,9 @@ ENTRY(arm720_processor_functions) ...@@ -189,25 +161,9 @@ ENTRY(arm720_processor_functions)
.word cpu_arm720_proc_fin .word cpu_arm720_proc_fin
.word cpu_arm720_reset .word cpu_arm720_reset
.word cpu_arm720_do_idle .word cpu_arm720_do_idle
.word cpu_arm720_dcache_clean_area
/* cache */
.word cpu_arm720_cache_clean_invalidate_all
.word cpu_arm720_cache_clean_invalidate_range
/* dcache */
.word cpu_arm720_dcache_invalidate_range
.word cpu_arm720_dcache_clean_range
.word cpu_arm720_dcache_clean_page
.word cpu_arm720_dcache_clean_entry
/* icache */
.word cpu_arm720_icache_invalidate_range
.word cpu_arm720_icache_invalidate_page
/* pgtable */
.word cpu_arm720_set_pgd .word cpu_arm720_set_pgd
.word cpu_arm720_set_pte .word cpu_arm720_set_pte
.size arm720_processor_functions, . - arm720_processor_functions .size arm720_processor_functions, . - arm720_processor_functions
.type cpu_arch_name, #object .type cpu_arch_name, #object
...@@ -238,4 +194,5 @@ __arm720_proc_info: ...@@ -238,4 +194,5 @@ __arm720_proc_info:
.long arm720_processor_functions .long arm720_processor_functions
.long v4_tlb_fns .long v4_tlb_fns
.long v4wt_user_fns .long v4wt_user_fns
.long v4_cache_fns
.size __arm720_proc_info, . - __arm720_proc_info .size __arm720_proc_info, . - __arm720_proc_info
...@@ -28,30 +28,33 @@ ...@@ -28,30 +28,33 @@
#include <linux/config.h> #include <linux/config.h>
#include <linux/init.h> #include <linux/init.h>
#include <asm/assembler.h> #include <asm/assembler.h>
#include <asm/constants.h>
#include <asm/procinfo.h> #include <asm/procinfo.h>
#include <asm/hardware.h> #include <asm/hardware.h>
#include <asm/page.h>
#include "proc-macros.S"
/* /*
* This is the maximum size of an area which will be invalidated * The size of one data cache line.
* using the single invalidate entry instructions. Anything larger */
* than this, and we go for the whole cache. #define CACHE_DLINESIZE 32
*
* This value should be chosen such that we choose the cheapest /*
* alternative. * The number of data cache segments.
*/ */
#define MAX_AREA_SIZE 16384 #define CACHE_DSEGMENTS 8
/* /*
* the cache line size of the I and D cache * The number of lines in a cache segment.
*/ */
#define DCACHELINESIZE 32 #define CACHE_DENTRIES 64
#define ICACHELINESIZE 32
/* /*
* and the page size * This is the size at which it becomes more efficient to
* clean the whole cache, rather than using the individual
* cache line maintainence instructions.
*/ */
#define PAGESIZE 4096 #define CACHE_DLIMIT 65536
.text .text
/* /*
...@@ -76,7 +79,11 @@ ENTRY(cpu_arm920_proc_fin) ...@@ -76,7 +79,11 @@ ENTRY(cpu_arm920_proc_fin)
stmfd sp!, {lr} stmfd sp!, {lr}
mov ip, #PSR_F_BIT | PSR_I_BIT | SVC_MODE mov ip, #PSR_F_BIT | PSR_I_BIT | SVC_MODE
msr cpsr_c, ip msr cpsr_c, ip
bl cpu_arm920_cache_clean_invalidate_all #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
bl arm920_flush_kern_cache_all
#else
bl v4wt_flush_kern_cache_all
#endif
mrc p15, 0, r0, c1, c0, 0 @ ctrl register mrc p15, 0, r0, c1, c0, 0 @ ctrl register
bic r0, r0, #0x1000 @ ...i............ bic r0, r0, #0x1000 @ ...i............
bic r0, r0, #0x000e @ ............wca. bic r0, r0, #0x000e @ ............wca.
...@@ -112,225 +119,184 @@ ENTRY(cpu_arm920_do_idle) ...@@ -112,225 +119,184 @@ ENTRY(cpu_arm920_do_idle)
mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt
mov pc, lr mov pc, lr
/* ================================= CACHE ================================ */
#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
/* /*
* cpu_arm920_cache_clean_invalidate_all() * flush_user_cache_all()
* *
* clean and invalidate all cache lines * Invalidate all cache entries in a particular address
* * space.
* Note:
* 1. we should preserve r0 at all times
*/ */
.align 5 ENTRY(arm920_flush_user_cache_all)
ENTRY(cpu_arm920_cache_clean_invalidate_all) /* FALLTHROUGH */
mov r2, #1
cpu_arm920_cache_clean_invalidate_all_r2:
mov ip, #0
#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
mcr p15, 0, ip, c7, c6, 0 @ invalidate D cache
#else
/* /*
* 'Clean & Invalidate whole DCache' * flush_kern_cache_all()
* Re-written to use Index Ops. *
* Uses registers r1, r3 and ip * Clean and invalidate the entire cache.
*/ */
mov r1, #7 << 5 @ 8 segments ENTRY(arm920_flush_kern_cache_all)
1: orr r3, r1, #63 << 26 @ 64 entries mov r2, #VM_EXEC
2: mcr p15, 0, r3, c7, c14, 2 @ clean & invalidate D index mov ip, #0
__flush_whole_cache:
mov r1, #(CACHE_DSEGMENTS - 1) << 5 @ 8 segments
1: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries
2: mcr p15, 0, r3, c7, c14, 2 @ clean+invalidate D index
subs r3, r3, #1 << 26 subs r3, r3, #1 << 26
bcs 2b @ entries 63 to 0 bcs 2b @ entries 63 to 0
subs r1, r1, #1 << 5 subs r1, r1, #1 << 5
bcs 1b @ segments 7 to 0 bcs 1b @ segments 7 to 0
#endif tst r2, #VM_EXEC
teq r2, #0
mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
mcr p15, 0, ip, c7, c10, 4 @ drain WB mcrne p15, 0, ip, c7, c10, 4 @ drain WB
mov pc, lr mov pc, lr
/* /*
* cpu_arm920_cache_clean_invalidate_range(start, end, flags) * flush_user_cache_range(start, end, flags)
* *
* clean and invalidate all cache lines associated with this area of memory * Invalidate a range of cache entries in the specified
* address space.
* *
* start: Area start address * - start - start address (inclusive)
* end: Area end address * - end - end address (exclusive)
* flags: nonzero for I cache as well * - flags - vm_flags for address space
*/ */
.align 5 ENTRY(arm920_flush_user_cache_range)
ENTRY(cpu_arm920_cache_clean_invalidate_range) mov ip, #0
bic r0, r0, #DCACHELINESIZE - 1 @ && added by PGM sub r3, r1, r0 @ calculate total size
bic r1, r1, #DCACHELINESIZE - 1 @ && added by DHM cmp r3, #CACHE_DLIMIT
sub r3, r1, r0 bhs __flush_whole_cache
cmp r3, #MAX_AREA_SIZE
bgt cpu_arm920_cache_clean_invalidate_all_r2 1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
1: teq r2, #0 tst r2, #VM_EXEC
#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
mcrne p15, 0, r0, c7, c5, 1 @ invalidate I entry
add r0, r0, #DCACHELINESIZE
mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
mcrne p15, 0, r0, c7, c5, 1 @ invalidate I entry
add r0, r0, #DCACHELINESIZE
#else
mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D entry
mcrne p15, 0, r0, c7, c5, 1 @ invalidate I entry
add r0, r0, #DCACHELINESIZE
mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D entry
mcrne p15, 0, r0, c7, c5, 1 @ invalidate I entry mcrne p15, 0, r0, c7, c5, 1 @ invalidate I entry
add r0, r0, #DCACHELINESIZE add r0, r0, #CACHE_DLINESIZE
#endif
cmp r0, r1 cmp r0, r1
blt 1b blo 1b
tst r2, #VM_EXEC
mcr p15, 0, r1, c7, c10, 4 @ drain WB mcrne p15, 0, ip, c7, c10, 4 @ drain WB
mov pc, lr mov pc, lr
/* ================================ D-CACHE =============================== */
/* /*
* cpu_arm920_dcache_invalidate_range(start, end) * coherent_kern_range(start, end)
* *
* throw away all D-cached data in specified region without an obligation * Ensure coherency between the Icache and the Dcache in the
* to write them back. Note however that we must clean the D-cached entries * region described by start, end. If you have non-snooping
* around the boundaries if the start and/or end address are not cache * Harvard caches, you need to implement this function.
* aligned.
* *
* start: virtual start address * - start - virtual start address
* end: virtual end address * - end - virtual end address
*/ */
.align 5 ENTRY(arm920_coherent_kern_range)
ENTRY(cpu_arm920_dcache_invalidate_range) bic r0, r0, #CACHE_DLINESIZE - 1
#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
tst r0, #DCACHELINESIZE - 1 mcr p15, 0, r0, c7, c5, 1 @ invalidate I entry
mcrne p15, 0, r0, c7, c10, 1 @ clean D entry add r0, r0, #CACHE_DLINESIZE
tst r1, #DCACHELINESIZE - 1
mcrne p15, 0, r1, c7, c10, 1 @ clean D entry
#endif @ clean D entry
bic r0, r0, #DCACHELINESIZE - 1
bic r1, r1, #DCACHELINESIZE - 1
1: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
add r0, r0, #DCACHELINESIZE
cmp r0, r1 cmp r0, r1
blt 1b blo 1b
mcr p15, 0, r0, c7, c10, 4 @ drain WB
mov pc, lr mov pc, lr
/* /*
* cpu_arm920_dcache_clean_range(start, end) * flush_kern_dcache_page(void *page)
* *
* For the specified virtual address range, ensure that all caches contain * Ensure no D cache aliasing occurs, either with itself or
* clean data, such that peripheral accesses to the physical RAM fetch * the I cache
* correct data.
* *
* start: virtual start address * - addr - page aligned address
* end: virtual end address
*/ */
.align 5 ENTRY(arm920_flush_kern_dcache_page)
ENTRY(cpu_arm920_dcache_clean_range) add r1, r0, #PAGE_SZ
#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH 1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
bic r0, r0, #DCACHELINESIZE - 1 add r0, r0, #CACHE_DLINESIZE
sub r1, r1, r0 cmp r0, r1
cmp r1, #MAX_AREA_SIZE blo 1b
mov r2, #0 mov r0, #0
bgt cpu_arm920_cache_clean_invalidate_all_r2 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
mcr p15, 0, r0, c7, c10, 4 @ drain WB
bic r1, r1, #DCACHELINESIZE -1
add r1, r1, #DCACHELINESIZE
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
add r0, r0, #DCACHELINESIZE
subs r1, r1, #DCACHELINESIZE
bpl 1b
#endif
mcr p15, 0, r2, c7, c10, 4 @ drain WB
mov pc, lr mov pc, lr
/* /*
* cpu_arm920_dcache_clean_page(page) * dma_inv_range(start, end)
* *
* Cleans a single page of dcache so that if we have any future aliased * Invalidate (discard) the specified virtual address range.
* mappings, they will be consistent at the time that they are created. * May not write back any entries. If 'start' or 'end'
* are not cache line aligned, those lines must be written
* back.
* *
* page: virtual address of page to clean from dcache * - start - virtual start address
* - end - virtual end address
* *
* Note: * (same as v4wb)
* 1. we don't need to flush the write buffer in this case.
* 2. we don't invalidate the entries since when we write the page
* out to disk, the entries may get reloaded into the cache.
*/ */
.align 5 ENTRY(arm920_dma_inv_range)
ENTRY(cpu_arm920_dcache_clean_page) tst r0, #CACHE_DLINESIZE - 1
#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH bic r0, r0, #CACHE_DLINESIZE - 1
mov r1, #PAGESIZE mcrne p15, 0, r0, c7, c10, 1 @ clean D entry
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry tst r1, #CACHE_DLINESIZE - 1
add r0, r0, #DCACHELINESIZE mcrne p15, 0, r1, c7, c10, 1 @ clean D entry
mcr p15, 0, r0, c7, c10, 1 @ clean D entry 1: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
add r0, r0, #DCACHELINESIZE add r0, r0, #CACHE_DLINESIZE
subs r1, r1, #2 * DCACHELINESIZE cmp r0, r1
bne 1b blo 1b
#endif mcr p15, 0, r0, c7, c10, 4 @ drain WB
mov pc, lr mov pc, lr
/* /*
* cpu_arm920_dcache_clean_entry(addr) * dma_clean_range(start, end)
* *
* Clean the specified entry of any caches such that the MMU * Clean the specified virtual address range.
* translation fetches will obtain correct data.
* *
* addr: cache-unaligned virtual address * - start - virtual start address
* - end - virtual end address
*
* (same as v4wb)
*/ */
.align 5 ENTRY(arm920_dma_clean_range)
ENTRY(cpu_arm920_dcache_clean_entry) bic r0, r0, #CACHE_DLINESIZE - 1
#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
mcr p15, 0, r0, c7, c10, 1 @ clean D entry add r0, r0, #CACHE_DLINESIZE
#endif cmp r0, r1
blo 1b
mcr p15, 0, r0, c7, c10, 4 @ drain WB mcr p15, 0, r0, c7, c10, 4 @ drain WB
mov pc, lr mov pc, lr
/* ================================ I-CACHE =============================== */
/* /*
* cpu_arm920_icache_invalidate_range(start, end) * dma_flush_range(start, end)
* *
* invalidate a range of virtual addresses from the Icache * Clean and invalidate the specified virtual address range.
* *
* This is a little misleading, it is not intended to clean out * - start - virtual start address
* the i-cache but to make sure that any data written to the * - end - virtual end address
* range is made consistent. This means that when we execute code
* in that region, everything works as we expect.
*
* This generally means writing back data in the Dcache and
* write buffer and flushing the Icache over that region
*
* start: virtual start address
* end: virtual end address
*
* NOTE: ICACHELINESIZE == DCACHELINESIZE (so we don't need to
* loop twice, once for i-cache, once for d-cache)
*/ */
.align 5 ENTRY(arm920_dma_flush_range)
ENTRY(cpu_arm920_icache_invalidate_range) bic r0, r0, #CACHE_DLINESIZE - 1
bic r0, r0, #ICACHELINESIZE - 1 @ Safety check 1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
sub r1, r1, r0 add r0, r0, #CACHE_DLINESIZE
cmp r1, #MAX_AREA_SIZE cmp r0, r1
bgt cpu_arm920_cache_clean_invalidate_all_r2 blo 1b
mcr p15, 0, r0, c7, c10, 4 @ drain WB
mov pc, lr
bic r1, r1, #ICACHELINESIZE - 1 ENTRY(arm920_cache_fns)
add r1, r1, #ICACHELINESIZE .long arm920_flush_kern_cache_all
.long arm920_flush_user_cache_all
.long arm920_flush_user_cache_range
.long arm920_coherent_kern_range
.long arm920_flush_kern_dcache_page
.long arm920_dma_inv_range
.long arm920_dma_clean_range
.long arm920_dma_flush_range
1: mcr p15, 0, r0, c7, c5, 1 @ Clean I entry #endif
mcr p15, 0, r0, c7, c10, 1 @ Clean D entry
add r0, r0, #ICACHELINESIZE
subs r1, r1, #ICACHELINESIZE
bne 1b
mov r0, #0
mcr p15, 0, r0, c7, c10, 4 @ drain WB
mov pc, lr
ENTRY(cpu_arm920_icache_invalidate_page) ENTRY(cpu_arm920_dcache_clean_area)
mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
add r0, r0, #CACHE_DLINESIZE
subs r1, r1, #CACHE_DLINESIZE
bhi 1b
mov pc, lr mov pc, lr
/* =============================== PageTable ============================== */ /* =============================== PageTable ============================== */
...@@ -346,15 +312,14 @@ ENTRY(cpu_arm920_icache_invalidate_page) ...@@ -346,15 +312,14 @@ ENTRY(cpu_arm920_icache_invalidate_page)
ENTRY(cpu_arm920_set_pgd) ENTRY(cpu_arm920_set_pgd)
mov ip, #0 mov ip, #0
#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH #ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
/* Any reason why we don't use mcr p15, 0, r0, c7, c7, 0 here? --rmk */
mcr p15, 0, ip, c7, c6, 0 @ invalidate D cache mcr p15, 0, ip, c7, c6, 0 @ invalidate D cache
#else #else
@ && 'Clean & Invalidate whole DCache' @ && 'Clean & Invalidate whole DCache'
@ && Re-written to use Index Ops. @ && Re-written to use Index Ops.
@ && Uses registers r1, r3 and ip @ && Uses registers r1, r3 and ip
mov r1, #7 << 5 @ 8 segments mov r1, #(CACHE_DSEGMENTS - 1) << 5 @ 8 segments
1: orr r3, r1, #63 << 26 @ 64 entries 1: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries
2: mcr p15, 0, r3, c7, c14, 2 @ clean & invalidate D index 2: mcr p15, 0, r3, c7, c14, 2 @ clean & invalidate D index
subs r3, r3, #1 << 26 subs r3, r3, #1 << 26
bcs 2b @ entries 63 to 0 bcs 2b @ entries 63 to 0
...@@ -374,8 +339,6 @@ ENTRY(cpu_arm920_set_pgd) ...@@ -374,8 +339,6 @@ ENTRY(cpu_arm920_set_pgd)
*/ */
.align 5 .align 5
ENTRY(cpu_arm920_set_pte) ENTRY(cpu_arm920_set_pte)
tst r0, #2048
streq r0, [r0, -r0] @ BUG_ON
str r1, [r0], #-2048 @ linux version str r1, [r0], #-2048 @ linux version
eor r1, r1, #LPTE_PRESENT | LPTE_YOUNG | LPTE_WRITE | LPTE_DIRTY eor r1, r1, #LPTE_PRESENT | LPTE_YOUNG | LPTE_WRITE | LPTE_DIRTY
...@@ -384,7 +347,7 @@ ENTRY(cpu_arm920_set_pte) ...@@ -384,7 +347,7 @@ ENTRY(cpu_arm920_set_pte)
bic r2, r2, #3 bic r2, r2, #3
orr r2, r2, #HPTE_TYPE_SMALL orr r2, r2, #HPTE_TYPE_SMALL
tst r1, #LPTE_USER | LPTE_EXEC @ User or Exec? tst r1, #LPTE_USER @ User or Exec?
orrne r2, r2, #HPTE_AP_READ orrne r2, r2, #HPTE_AP_READ
tst r1, #LPTE_WRITE | LPTE_DIRTY @ Write and Dirty? tst r1, #LPTE_WRITE | LPTE_DIRTY @ Write and Dirty?
...@@ -468,25 +431,9 @@ arm920_processor_functions: ...@@ -468,25 +431,9 @@ arm920_processor_functions:
.word cpu_arm920_proc_fin .word cpu_arm920_proc_fin
.word cpu_arm920_reset .word cpu_arm920_reset
.word cpu_arm920_do_idle .word cpu_arm920_do_idle
.word cpu_arm920_dcache_clean_area
/* cache */
.word cpu_arm920_cache_clean_invalidate_all
.word cpu_arm920_cache_clean_invalidate_range
/* dcache */
.word cpu_arm920_dcache_invalidate_range
.word cpu_arm920_dcache_clean_range
.word cpu_arm920_dcache_clean_page
.word cpu_arm920_dcache_clean_entry
/* icache */
.word cpu_arm920_icache_invalidate_range
.word cpu_arm920_icache_invalidate_page
/* pgtable */
.word cpu_arm920_set_pgd .word cpu_arm920_set_pgd
.word cpu_arm920_set_pte .word cpu_arm920_set_pte
.size arm920_processor_functions, . - arm920_processor_functions .size arm920_processor_functions, . - arm920_processor_functions
.type cpu_arch_name, #object .type cpu_arch_name, #object
...@@ -515,4 +462,9 @@ __arm920_proc_info: ...@@ -515,4 +462,9 @@ __arm920_proc_info:
.long arm920_processor_functions .long arm920_processor_functions
.long v4wbi_tlb_fns .long v4wbi_tlb_fns
.long v4wb_user_fns .long v4wb_user_fns
#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
.long arm920_cache_fns
#else
.long v4wt_cache_fns
#endif
.size __arm920_proc_info, . - __arm920_proc_info .size __arm920_proc_info, . - __arm920_proc_info
...@@ -29,30 +29,34 @@ ...@@ -29,30 +29,34 @@
#include <linux/config.h> #include <linux/config.h>
#include <linux/init.h> #include <linux/init.h>
#include <asm/assembler.h> #include <asm/assembler.h>
#include <asm/constants.h>
#include <asm/procinfo.h> #include <asm/procinfo.h>
#include <asm/hardware.h> #include <asm/hardware.h>
#include <asm/page.h>
#include "proc-macros.S"
/* /*
* This is the maximum size of an area which will be invalidated * The size of one data cache line.
* using the single invalidate entry instructions. Anything larger */
* than this, and we go for the whole cache. #define CACHE_DLINESIZE 32
*
* This value should be chosen such that we choose the cheapest /*
* alternative. * The number of data cache segments.
*/ */
#define MAX_AREA_SIZE 8192 #define CACHE_DSEGMENTS 4
/* /*
* the cache line size of the I and D cache * The number of lines in a cache segment.
*/ */
#define DCACHELINESIZE 32 #define CACHE_DENTRIES 64
#define ICACHELINESIZE 32
/* /*
* and the page size * This is the size at which it becomes more efficient to
* clean the whole cache, rather than using the individual
* cache line maintainence instructions. (I think this should
* be 32768).
*/ */
#define PAGESIZE 4096 #define CACHE_DLIMIT 8192
.text .text
/* /*
...@@ -77,7 +81,11 @@ ENTRY(cpu_arm922_proc_fin) ...@@ -77,7 +81,11 @@ ENTRY(cpu_arm922_proc_fin)
stmfd sp!, {lr} stmfd sp!, {lr}
mov ip, #PSR_F_BIT | PSR_I_BIT | SVC_MODE mov ip, #PSR_F_BIT | PSR_I_BIT | SVC_MODE
msr cpsr_c, ip msr cpsr_c, ip
bl cpu_arm922_cache_clean_invalidate_all #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
bl arm922_flush_kern_cache_all
#else
bl v4wt_flush_kern_cache_all
#endif
mrc p15, 0, r0, c1, c0, 0 @ ctrl register mrc p15, 0, r0, c1, c0, 0 @ ctrl register
bic r0, r0, #0x1000 @ ...i............ bic r0, r0, #0x1000 @ ...i............
bic r0, r0, #0x000e @ ............wca. bic r0, r0, #0x000e @ ............wca.
...@@ -113,225 +121,186 @@ ENTRY(cpu_arm922_do_idle) ...@@ -113,225 +121,186 @@ ENTRY(cpu_arm922_do_idle)
mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt
mov pc, lr mov pc, lr
/* ================================= CACHE ================================ */
#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
/* /*
* cpu_arm922_cache_clean_invalidate_all() * flush_user_cache_all()
* *
* clean and invalidate all cache lines * Clean and invalidate all cache entries in a particular
* * address space.
* Note:
* 1. we should preserve r0 at all times
*/ */
.align 5 ENTRY(arm922_flush_user_cache_all)
ENTRY(cpu_arm922_cache_clean_invalidate_all) /* FALLTHROUGH */
mov r2, #1
cpu_arm922_cache_clean_invalidate_all_r2:
mov ip, #0
#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
mcr p15, 0, ip, c7, c6, 0 @ invalidate D cache
#else
/* /*
* 'Clean & Invalidate whole DCache' * flush_kern_cache_all()
* Re-written to use Index Ops. *
* Uses registers r1, r3 and ip * Clean and invalidate the entire cache.
*/ */
mov r1, #3 << 5 @ 4 segments ENTRY(arm922_flush_kern_cache_all)
1: orr r3, r1, #63 << 26 @ 64 entries mov r2, #VM_EXEC
2: mcr p15, 0, r3, c7, c14, 2 @ clean & invalidate D index mov ip, #0
__flush_whole_cache:
mov r1, #(CACHE_DSEGMENTS - 1) << 5 @ 8 segments
1: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries
2: mcr p15, 0, r3, c7, c14, 2 @ clean+invalidate D index
subs r3, r3, #1 << 26 subs r3, r3, #1 << 26
bcs 2b @ entries 63 to 0 bcs 2b @ entries 63 to 0
subs r1, r1, #1 << 5 subs r1, r1, #1 << 5
bcs 1b @ segments 7 to 0 bcs 1b @ segments 7 to 0
#endif tst r2, #VM_EXEC
teq r2, #0
mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
mcr p15, 0, ip, c7, c10, 4 @ drain WB mcrne p15, 0, ip, c7, c10, 4 @ drain WB
mov pc, lr mov pc, lr
/* /*
* cpu_arm922_cache_clean_invalidate_range(start, end, flags) * flush_user_cache_range(start, end, flags)
* *
* clean and invalidate all cache lines associated with this area of memory * Clean and invalidate a range of cache entries in the
* specified address range.
* *
* start: Area start address * - start - start address (inclusive)
* end: Area end address * - end - end address (exclusive)
* flags: nonzero for I cache as well * - flags - vm_flags describing address space
*/ */
.align 5 ENTRY(arm922_flush_user_cache_range)
ENTRY(cpu_arm922_cache_clean_invalidate_range) mov ip, #0
bic r0, r0, #DCACHELINESIZE - 1 @ && added by PGM sub r3, r1, r0 @ calculate total size
bic r1, r1, #DCACHELINESIZE - 1 @ && added by DHM cmp r3, #CACHE_DLIMIT
sub r3, r1, r0 bhs __flush_whole_cache
cmp r3, #MAX_AREA_SIZE
bgt cpu_arm922_cache_clean_invalidate_all_r2 1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
1: teq r2, #0 tst r2, #VM_EXEC
#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
mcrne p15, 0, r0, c7, c5, 1 @ invalidate I entry
add r0, r0, #DCACHELINESIZE
mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
mcrne p15, 0, r0, c7, c5, 1 @ invalidate I entry
add r0, r0, #DCACHELINESIZE
#else
mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D entry
mcrne p15, 0, r0, c7, c5, 1 @ invalidate I entry
add r0, r0, #DCACHELINESIZE
mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D entry
mcrne p15, 0, r0, c7, c5, 1 @ invalidate I entry mcrne p15, 0, r0, c7, c5, 1 @ invalidate I entry
add r0, r0, #DCACHELINESIZE add r0, r0, #CACHE_DLINESIZE
#endif
cmp r0, r1 cmp r0, r1
blt 1b blo 1b
tst r2, #VM_EXEC
mcr p15, 0, r1, c7, c10, 4 @ drain WB mcrne p15, 0, ip, c7, c10, 4 @ drain WB
mov pc, lr mov pc, lr
/* ================================ D-CACHE =============================== */
/* /*
* cpu_arm922_dcache_invalidate_range(start, end) * coherent_kern_range(start, end)
* *
* throw away all D-cached data in specified region without an obligation * Ensure coherency between the Icache and the Dcache in the
* to write them back. Note however that we must clean the D-cached entries * region described by start, end. If you have non-snooping
* around the boundaries if the start and/or end address are not cache * Harvard caches, you need to implement this function.
* aligned.
* *
* start: virtual start address * - start - virtual start address
* end: virtual end address * - end - virtual end address
*/ */
.align 5 ENTRY(arm922_coherent_kern_range)
ENTRY(cpu_arm922_dcache_invalidate_range) bic r0, r0, #CACHE_DLINESIZE - 1
#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
tst r0, #DCACHELINESIZE - 1 mcr p15, 0, r0, c7, c5, 1 @ invalidate I entry
mcrne p15, 0, r0, c7, c10, 1 @ clean D entry add r0, r0, #CACHE_DLINESIZE
tst r1, #DCACHELINESIZE - 1
mcrne p15, 0, r1, c7, c10, 1 @ clean D entry
#endif @ clean D entry
bic r0, r0, #DCACHELINESIZE - 1
bic r1, r1, #DCACHELINESIZE - 1
1: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
add r0, r0, #DCACHELINESIZE
cmp r0, r1 cmp r0, r1
blt 1b blo 1b
mcr p15, 0, r0, c7, c10, 4 @ drain WB
mov pc, lr mov pc, lr
/* /*
* cpu_arm922_dcache_clean_range(start, end) * flush_kern_dcache_page(void *page)
* *
* For the specified virtual address range, ensure that all caches contain * Ensure no D cache aliasing occurs, either with itself or
* clean data, such that peripheral accesses to the physical RAM fetch * the I cache
* correct data.
* *
* start: virtual start address * - addr - page aligned address
* end: virtual end address
*/ */
.align 5 ENTRY(arm922_flush_kern_dcache_page)
ENTRY(cpu_arm922_dcache_clean_range) add r1, r0, #PAGE_SZ
#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH 1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
bic r0, r0, #DCACHELINESIZE - 1 add r0, r0, #CACHE_DLINESIZE
sub r1, r1, r0 cmp r0, r1
cmp r1, #MAX_AREA_SIZE blo 1b
mov r2, #0 mov r0, #0
bgt cpu_arm922_cache_clean_invalidate_all_r2 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
mcr p15, 0, r0, c7, c10, 4 @ drain WB
bic r1, r1, #DCACHELINESIZE -1
add r1, r1, #DCACHELINESIZE
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
add r0, r0, #DCACHELINESIZE
subs r1, r1, #DCACHELINESIZE
bpl 1b
#endif
mcr p15, 0, r2, c7, c10, 4 @ drain WB
mov pc, lr mov pc, lr
/* /*
* cpu_arm922_dcache_clean_page(page) * dma_inv_range(start, end)
* *
* Cleans a single page of dcache so that if we have any future aliased * Invalidate (discard) the specified virtual address range.
* mappings, they will be consistent at the time that they are created. * May not write back any entries. If 'start' or 'end'
* are not cache line aligned, those lines must be written
* back.
* *
* page: virtual address of page to clean from dcache * - start - virtual start address
* - end - virtual end address
* *
* Note: * (same as v4wb)
* 1. we don't need to flush the write buffer in this case.
* 2. we don't invalidate the entries since when we write the page
* out to disk, the entries may get reloaded into the cache.
*/ */
.align 5 ENTRY(arm922_dma_inv_range)
ENTRY(cpu_arm922_dcache_clean_page) tst r0, #CACHE_DLINESIZE - 1
#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH bic r0, r0, #CACHE_DLINESIZE - 1
mov r1, #PAGESIZE mcrne p15, 0, r0, c7, c10, 1 @ clean D entry
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry tst r1, #CACHE_DLINESIZE - 1
add r0, r0, #DCACHELINESIZE mcrne p15, 0, r1, c7, c10, 1 @ clean D entry
mcr p15, 0, r0, c7, c10, 1 @ clean D entry 1: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
add r0, r0, #DCACHELINESIZE add r0, r0, #CACHE_DLINESIZE
subs r1, r1, #2 * DCACHELINESIZE cmp r0, r1
bne 1b blo 1b
#endif mcr p15, 0, r0, c7, c10, 4 @ drain WB
mov pc, lr mov pc, lr
/* /*
* cpu_arm922_dcache_clean_entry(addr) * dma_clean_range(start, end)
*
* Clean the specified virtual address range.
* *
* Clean the specified entry of any caches such that the MMU * - start - virtual start address
* translation fetches will obtain correct data. * - end - virtual end address
* *
* addr: cache-unaligned virtual address * (same as v4wb)
*/ */
.align 5 ENTRY(arm922_dma_clean_range)
ENTRY(cpu_arm922_dcache_clean_entry) bic r0, r0, #CACHE_DLINESIZE - 1
#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
mcr p15, 0, r0, c7, c10, 1 @ clean D entry add r0, r0, #CACHE_DLINESIZE
#endif cmp r0, r1
blo 1b
mcr p15, 0, r0, c7, c10, 4 @ drain WB mcr p15, 0, r0, c7, c10, 4 @ drain WB
mov pc, lr mov pc, lr
/* ================================ I-CACHE =============================== */
/* /*
* cpu_arm922_icache_invalidate_range(start, end) * dma_flush_range(start, end)
*
* invalidate a range of virtual addresses from the Icache
* *
* This is a little misleading, it is not intended to clean out * Clean and invalidate the specified virtual address range.
* the i-cache but to make sure that any data written to the
* range is made consistent. This means that when we execute code
* in that region, everything works as we expect.
* *
* This generally means writing back data in the Dcache and * - start - virtual start address
* write buffer and flushing the Icache over that region * - end - virtual end address
*
* start: virtual start address
* end: virtual end address
*
* NOTE: ICACHELINESIZE == DCACHELINESIZE (so we don't need to
* loop twice, once for i-cache, once for d-cache)
*/ */
.align 5 ENTRY(arm922_dma_flush_range)
ENTRY(cpu_arm922_icache_invalidate_range) bic r0, r0, #CACHE_DLINESIZE - 1
bic r0, r0, #ICACHELINESIZE - 1 @ Safety check 1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
sub r1, r1, r0 add r0, r0, #CACHE_DLINESIZE
cmp r1, #MAX_AREA_SIZE cmp r0, r1
bgt cpu_arm922_cache_clean_invalidate_all_r2 blo 1b
mcr p15, 0, r0, c7, c10, 4 @ drain WB
mov pc, lr
bic r1, r1, #ICACHELINESIZE - 1 ENTRY(arm922_cache_fns)
add r1, r1, #ICACHELINESIZE .long arm922_flush_kern_cache_all
.long arm922_flush_user_cache_all
.long arm922_flush_user_cache_range
.long arm922_coherent_kern_range
.long arm922_flush_kern_dcache_page
.long arm922_dma_inv_range
.long arm922_dma_clean_range
.long arm922_dma_flush_range
1: mcr p15, 0, r0, c7, c5, 1 @ Clean I entry #endif
mcr p15, 0, r0, c7, c10, 1 @ Clean D entry
add r0, r0, #ICACHELINESIZE
subs r1, r1, #ICACHELINESIZE
bne 1b
mov r0, #0
mcr p15, 0, r0, c7, c10, 4 @ drain WB
mov pc, lr
ENTRY(cpu_arm922_icache_invalidate_page) ENTRY(cpu_arm922_dcache_clean_area)
mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
add r0, r0, #CACHE_DLINESIZE
subs r1, r1, #CACHE_DLINESIZE
bhi 1b
#endif
mov pc, lr mov pc, lr
/* =============================== PageTable ============================== */ /* =============================== PageTable ============================== */
...@@ -347,15 +316,14 @@ ENTRY(cpu_arm922_icache_invalidate_page) ...@@ -347,15 +316,14 @@ ENTRY(cpu_arm922_icache_invalidate_page)
ENTRY(cpu_arm922_set_pgd) ENTRY(cpu_arm922_set_pgd)
mov ip, #0 mov ip, #0
#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH #ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
/* Any reason why we don't use mcr p15, 0, r0, c7, c7, 0 here? --rmk */
mcr p15, 0, ip, c7, c6, 0 @ invalidate D cache mcr p15, 0, ip, c7, c6, 0 @ invalidate D cache
#else #else
@ && 'Clean & Invalidate whole DCache' @ && 'Clean & Invalidate whole DCache'
@ && Re-written to use Index Ops. @ && Re-written to use Index Ops.
@ && Uses registers r1, r3 and ip @ && Uses registers r1, r3 and ip
mov r1, #3 << 5 @ 4 segments mov r1, #(CACHE_DSEGMENTS - 1) << 5 @ 4 segments
1: orr r3, r1, #63 << 26 @ 64 entries 1: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries
2: mcr p15, 0, r3, c7, c14, 2 @ clean & invalidate D index 2: mcr p15, 0, r3, c7, c14, 2 @ clean & invalidate D index
subs r3, r3, #1 << 26 subs r3, r3, #1 << 26
bcs 2b @ entries 63 to 0 bcs 2b @ entries 63 to 0
...@@ -383,7 +351,7 @@ ENTRY(cpu_arm922_set_pte) ...@@ -383,7 +351,7 @@ ENTRY(cpu_arm922_set_pte)
bic r2, r2, #3 bic r2, r2, #3
orr r2, r2, #HPTE_TYPE_SMALL orr r2, r2, #HPTE_TYPE_SMALL
tst r1, #LPTE_USER | LPTE_EXEC @ User or Exec? tst r1, #LPTE_USER @ User?
orrne r2, r2, #HPTE_AP_READ orrne r2, r2, #HPTE_AP_READ
tst r1, #LPTE_WRITE | LPTE_DIRTY @ Write and Dirty? tst r1, #LPTE_WRITE | LPTE_DIRTY @ Write and Dirty?
...@@ -467,25 +435,9 @@ arm922_processor_functions: ...@@ -467,25 +435,9 @@ arm922_processor_functions:
.word cpu_arm922_proc_fin .word cpu_arm922_proc_fin
.word cpu_arm922_reset .word cpu_arm922_reset
.word cpu_arm922_do_idle .word cpu_arm922_do_idle
.word cpu_arm922_dcache_clean_area
/* cache */
.word cpu_arm922_cache_clean_invalidate_all
.word cpu_arm922_cache_clean_invalidate_range
/* dcache */
.word cpu_arm922_dcache_invalidate_range
.word cpu_arm922_dcache_clean_range
.word cpu_arm922_dcache_clean_page
.word cpu_arm922_dcache_clean_entry
/* icache */
.word cpu_arm922_icache_invalidate_range
.word cpu_arm922_icache_invalidate_page
/* pgtable */
.word cpu_arm922_set_pgd .word cpu_arm922_set_pgd
.word cpu_arm922_set_pte .word cpu_arm922_set_pte
.size arm922_processor_functions, . - arm922_processor_functions .size arm922_processor_functions, . - arm922_processor_functions
.type cpu_arch_name, #object .type cpu_arch_name, #object
...@@ -514,4 +466,9 @@ __arm922_proc_info: ...@@ -514,4 +466,9 @@ __arm922_proc_info:
.long arm922_processor_functions .long arm922_processor_functions
.long v4wbi_tlb_fns .long v4wbi_tlb_fns
.long v4wb_user_fns .long v4wb_user_fns
#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
.long arm922_cache_fns
#else
.long v4wt_cache_fns
#endif
.size __arm922_proc_info, . - __arm922_proc_info .size __arm922_proc_info, . - __arm922_proc_info
...@@ -28,9 +28,10 @@ ...@@ -28,9 +28,10 @@
#include <linux/config.h> #include <linux/config.h>
#include <linux/init.h> #include <linux/init.h>
#include <asm/assembler.h> #include <asm/assembler.h>
#include <asm/constants.h>
#include <asm/procinfo.h> #include <asm/procinfo.h>
#include <asm/hardware.h> #include <asm/hardware.h>
#include <asm/page.h>
#include "proc-macros.S"
/* /*
* This is the maximum size of an area which will be invalidated * This is the maximum size of an area which will be invalidated
...@@ -40,21 +41,14 @@ ...@@ -40,21 +41,14 @@
* This value should be chosen such that we choose the cheapest * This value should be chosen such that we choose the cheapest
* alternative. * alternative.
*/ */
#define MAX_AREA_SIZE 16384 #define CACHE_DLIMIT 16384
/* /*
* the cache line size of the I and D cache * the cache line size of the I and D cache
*/ */
#define DCACHELINESIZE 32 #define CACHE_DLINESIZE 32
#define ICACHELINESIZE 32
/*
* and the page size
*/
#define PAGESIZE 4096
.text .text
/* /*
* cpu_arm926_check_bugs() * cpu_arm926_check_bugs()
*/ */
...@@ -77,17 +71,17 @@ ENTRY(cpu_arm926_proc_fin) ...@@ -77,17 +71,17 @@ ENTRY(cpu_arm926_proc_fin)
stmfd sp!, {lr} stmfd sp!, {lr}
mov ip, #PSR_F_BIT | PSR_I_BIT | SVC_MODE mov ip, #PSR_F_BIT | PSR_I_BIT | SVC_MODE
msr cpsr_c, ip msr cpsr_c, ip
bl cpu_arm926_cache_clean_invalidate_all bl arm926_flush_kern_cache_all
mrc p15, 0, r0, c1, c0, 0 @ ctrl register mrc p15, 0, r0, c1, c0, 0 @ ctrl register
bic r0, r0, #0x1000 @ ...i............ bic r0, r0, #0x1000 @ ...i............
bic r0, r0, #0x000e @ ............wca. bic r0, r0, #0x000e @ ............wca.
mcr p15, 0, r0, c1, c0, 0 @ disable caches mcr p15, 0, r0, c1, c0, 0 @ disable caches
ldmfd sp!, {pc} ldmfd sp!, {pc}
/* /*
* cpu_arm926_reset(loc) * cpu_arm926_reset(loc)
* *
* Perform a soft reset of the system. Put the CPU into the * Perform a soft reset of the system. Put the CPU into the
* same state as it would be if it had been reset, and branch * same state as it would be if it had been reset, and branch
* to what would be the reset vector. * to what would be the reset vector.
* *
...@@ -100,229 +94,223 @@ ENTRY(cpu_arm926_reset) ...@@ -100,229 +94,223 @@ ENTRY(cpu_arm926_reset)
mcr p15, 0, ip, c7, c10, 4 @ drain WB mcr p15, 0, ip, c7, c10, 4 @ drain WB
mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs
mrc p15, 0, ip, c1, c0, 0 @ ctrl register mrc p15, 0, ip, c1, c0, 0 @ ctrl register
bic ip, ip, #0x000f @ ............wcam bic ip, ip, #0x000f @ ............wcam
bic ip, ip, #0x1100 @ ...i...s........ bic ip, ip, #0x1100 @ ...i...s........
mcr p15, 0, ip, c1, c0, 0 @ ctrl register mcr p15, 0, ip, c1, c0, 0 @ ctrl register
mov pc, r0 mov pc, r0
/* /*
* cpu_arm926_do_idle() * cpu_arm926_do_idle()
*
* Called with IRQs disabled
*/ */
.align 5 .align 10
ENTRY(cpu_arm926_do_idle) ENTRY(cpu_arm926_do_idle)
mov r0, #0
mrc p15, 0, r1, c1, c0, 0 @ Read control register
mcr p15, 0, r0, c7, c10, 4 @ Drain write buffer
bic r2, r1, #1 << 12
mcr p15, 0, r2, c1, c0, 0 @ Disable I cache
mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt
mcr p15, 0, r1, c1, c0, 0 @ Restore ICache enable
mov pc, lr mov pc, lr
/* ================================= CACHE ================================ */
/* /*
* cpu_arm926_cache_clean_invalidate_all() * flush_user_cache_all()
* *
* clean and invalidate all cache lines * Clean and invalidate all cache entries in a particular
* address space.
*/
ENTRY(arm926_flush_user_cache_all)
/* FALLTHROUGH */
/*
* flush_kern_cache_all()
* *
* Note: * Clean and invalidate the entire cache.
* 1. we should preserve r0 at all times
*/ */
.align 5 ENTRY(arm926_flush_kern_cache_all)
ENTRY(cpu_arm926_cache_clean_invalidate_all) mov r2, #VM_EXEC
mov r2, #1
cpu_arm926_cache_clean_invalidate_all_r2:
mov ip, #0 mov ip, #0
__flush_whole_cache:
#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH #ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
mcr p15, 0, ip, c7, c6, 0 @ invalidate D cache mcr p15, 0, ip, c7, c6, 0 @ invalidate D cache
#else #else
1: mrc p15, 0, r15, c7, c14, 3 @ test,clean,invalidate 1: mrc p15, 0, r15, c7, c14, 3 @ test,clean,invalidate
bne 1b bne 1b
#endif #endif
teq r2, #0 tst r2, #VM_EXEC
mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
mcr p15, 0, ip, c7, c10, 4 @ drain WB mcrne p15, 0, ip, c7, c10, 4 @ drain WB
mov pc, lr mov pc, lr
/* /*
* cpu_arm926_cache_clean_invalidate_range(start, end, flags) * flush_user_cache_range(start, end, flags)
* *
* clean and invalidate all cache lines associated with this area of memory * Clean and invalidate a range of cache entries in the
* specified address range.
* *
* This is a little misleading, it is not intended to clean out * - start - start address (inclusive)
* the i-cache but to make sure that any data written to the * - end - end address (exclusive)
* range is made consistent. This means that when we execute code * - flags - vm_flags describing address space
* in that region, everything works as we expect.
*
* This generally means writing back data in the Dcache and
* write buffer and flushing the Icache over that region
* start: Area start address
* end: Area end address
* flags: nonzero for I cache as well
*/ */
.align 5 ENTRY(arm926_flush_user_cache_range)
ENTRY(cpu_arm926_cache_clean_invalidate_range) mov ip, #0
bic r0, r0, #DCACHELINESIZE - 1 @ && added by PGM sub r3, r1, r0 @ calculate total size
bic r1, r1, #DCACHELINESIZE - 1 @ && added by DHM cmp r3, #CACHE_DLIMIT
sub r3, r1, r0 bgt __flush_whole_cache
cmp r3, #MAX_AREA_SIZE 1: tst r2, #VM_EXEC
bgt cpu_arm926_cache_clean_invalidate_all_r2
1: teq r2, #0
#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH #ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
mcrne p15, 0, r0, c7, c5, 1 @ invalidate I entry mcrne p15, 0, r0, c7, c5, 1 @ invalidate I entry
add r0, r0, #DCACHELINESIZE add r0, r0, #CACHE_DLINESIZE
mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
mcrne p15, 0, r0, c7, c5, 1 @ invalidate I entry mcrne p15, 0, r0, c7, c5, 1 @ invalidate I entry
add r0, r0, #DCACHELINESIZE add r0, r0, #CACHE_DLINESIZE
#else #else
mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D entry mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D entry
mcrne p15, 0, r0, c7, c5, 1 @ invalidate I entry mcrne p15, 0, r0, c7, c5, 1 @ invalidate I entry
add r0, r0, #DCACHELINESIZE add r0, r0, #CACHE_DLINESIZE
mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D entry mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D entry
mcrne p15, 0, r0, c7, c5, 1 @ invalidate I entry mcrne p15, 0, r0, c7, c5, 1 @ invalidate I entry
add r0, r0, #DCACHELINESIZE add r0, r0, #CACHE_DLINESIZE
#endif #endif
cmp r0, r1 cmp r0, r1
blt 1b blo 1b
tst r2, #VM_EXEC
mcr p15, 0, r1, c7, c10, 4 @ drain WB mcrne p15, 0, ip, c7, c10, 4 @ drain WB
mov pc, lr mov pc, lr
/* ================================ D-CACHE =============================== */
/* /*
* cpu_arm926_dcache_invalidate_range(start, end) * coherent_kern_range(start, end)
* *
* throw away all D-cached data in specified region without an obligation * Ensure coherency between the Icache and the Dcache in the
* to write them back. Note however that we must clean the D-cached entries * region described by start, end. If you have non-snooping
* around the boundaries if the start and/or end address are not cache * Harvard caches, you need to implement this function.
* aligned.
* *
* start: virtual start address * - start - virtual start address
* end: virtual end address * - end - virtual end address
*/ */
.align 5 ENTRY(arm926_coherent_kern_range)
ENTRY(cpu_arm926_dcache_invalidate_range) bic r0, r0, #CACHE_DLINESIZE - 1
#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
tst r0, #DCACHELINESIZE - 1 mcr p15, 0, r0, c7, c5, 1 @ invalidate I entry
mcrne p15, 0, r0, c7, c10, 1 @ clean D entry add r0, r0, #CACHE_DLINESIZE
tst r1, #DCACHELINESIZE - 1
mcrne p15, 0, r1, c7, c10, 1
#endif @ clean D entry
bic r0, r0, #DCACHELINESIZE - 1
bic r1, r1, #DCACHELINESIZE - 1
1: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
add r0, r0, #DCACHELINESIZE
cmp r0, r1 cmp r0, r1
blt 1b blo 1b
mcr p15, 0, r0, c7, c10, 4 @ drain WB
mov pc, lr mov pc, lr
/* /*
* cpu_arm926_dcache_clean_range(start, end) * flush_kern_dcache_page(void *page)
* *
* For the specified virtual address range, ensure that all caches contain * Ensure no D cache aliasing occurs, either with itself or
* clean data, such that peripheral accesses to the physical RAM fetch * the I cache
* correct data.
* *
* start: virtual start address * - addr - page aligned address
* end: virtual end address
*/ */
.align 5 ENTRY(arm926_flush_kern_dcache_page)
ENTRY(cpu_arm926_dcache_clean_range) add r1, r0, #PAGE_SZ
#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH 1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
bic r0, r0, #DCACHELINESIZE - 1 add r0, r0, #CACHE_DLINESIZE
sub r1, r1, r0 cmp r0, r1
cmp r1, #MAX_AREA_SIZE blo 1b
mov r2, #0 mov r0, #0
bgt cpu_arm926_cache_clean_invalidate_all_r2 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
mcr p15, 0, r0, c7, c10, 4 @ drain WB
bic r1, r1, #DCACHELINESIZE -1
add r1, r1, #DCACHELINESIZE
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
add r0, r0, #DCACHELINESIZE
subs r1, r1, #DCACHELINESIZE
bpl 1b
#endif
mcr p15, 0, r2, c7, c10, 4 @ drain WB
mov pc, lr mov pc, lr
/* /*
* cpu_arm926_dcache_clean_page(page) * dma_inv_range(start, end)
* *
* Cleans a single page of dcache so that if we have any future aliased * Invalidate (discard) the specified virtual address range.
* mappings, they will be consistent at the time that they are created. * May not write back any entries. If 'start' or 'end'
* are not cache line aligned, those lines must be written
* back.
* *
* page: virtual address of page to clean from dcache * - start - virtual start address
* - end - virtual end address
* *
* Note: * (same as v4wb)
* 1. we don't need to flush the write buffer in this case.
* 2. we don't invalidate the entries since when we write the page
* out to disk, the entries may get reloaded into the cache.
*/ */
.align 5 ENTRY(arm926_dma_inv_range)
ENTRY(cpu_arm926_dcache_clean_page)
#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
mov r1, #PAGESIZE tst r0, #CACHE_DLINESIZE - 1
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry mcrne p15, 0, r0, c7, c10, 1 @ clean D entry
add r0, r0, #DCACHELINESIZE tst r1, #CACHE_DLINESIZE - 1
mcr p15, 0, r0, c7, c10, 1 @ clean D entry mcrne p15, 0, r1, c7, c10, 1 @ clean D entry
add r0, r0, #DCACHELINESIZE
subs r1, r1, #2 * DCACHELINESIZE
bne 1b
#endif #endif
bic r0, r0, #CACHE_DLINESIZE - 1
1: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
add r0, r0, #CACHE_DLINESIZE
cmp r0, r1
blo 1b
mcr p15, 0, r0, c7, c10, 4 @ drain WB
mov pc, lr mov pc, lr
/* /*
* cpu_arm926_dcache_clean_entry(addr) * dma_clean_range(start, end)
* *
* Clean the specified entry of any caches such that the MMU * Clean the specified virtual address range.
* translation fetches will obtain correct data.
* *
* addr: cache-unaligned virtual address * - start - virtual start address
* - end - virtual end address
*
* (same as v4wb)
*/ */
.align 5 ENTRY(arm926_dma_clean_range)
ENTRY(cpu_arm926_dcache_clean_entry)
#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
mcr p15, 0, r0, c7, c10, 1 @ clean D entry bic r0, r0, #CACHE_DLINESIZE - 1
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
add r0, r0, #CACHE_DLINESIZE
cmp r0, r1
blo 1b
#endif #endif
mcr p15, 0, r0, c7, c10, 4 @ drain WB mcr p15, 0, r0, c7, c10, 4 @ drain WB
mov pc, lr mov pc, lr
/* ================================ I-CACHE =============================== */
/* /*
* cpu_arm926_icache_invalidate_range(start, end) * dma_flush_range(start, end)
* *
* invalidate a range of virtual addresses from the Icache * Clean and invalidate the specified virtual address range.
* *
* start: virtual start address * - start - virtual start address
* end: virtual end address * - end - virtual end address
*/ */
.align 5 ENTRY(arm926_dma_flush_range)
ENTRY(cpu_arm926_icache_invalidate_range) bic r0, r0, #CACHE_DLINESIZE - 1
bic r0, r0, #DCACHELINESIZE - 1 @ Safety check 1:
sub r1, r1, r0 #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
cmp r1, #MAX_AREA_SIZE mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
bgt cpu_arm926_cache_clean_invalidate_all_r2 #else
mcr p15, 0, r0, c7, c10, 1 @ clean D entry
bic r1, r1, #DCACHELINESIZE - 1 #endif
add r1, r1, #DCACHELINESIZE add r0, r0, #CACHE_DLINESIZE
cmp r0, r1
1: mcr p15, 0, r0, c7, c5, 1 @ clean I entries blo 1b
add r0, r0, #DCACHELINESIZE
subs r1, r1, #DCACHELINESIZE
bne 1b
mov r0, #0
mcr p15, 0, r0, c7, c10, 4 @ drain WB mcr p15, 0, r0, c7, c10, 4 @ drain WB
mov pc, lr mov pc, lr
ENTRY(cpu_arm926_icache_invalidate_page) ENTRY(arm926_cache_fns)
mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache .long arm926_flush_kern_cache_all
.long arm926_flush_user_cache_all
.long arm926_flush_user_cache_range
.long arm926_coherent_kern_range
.long arm926_flush_kern_dcache_page
.long arm926_dma_inv_range
.long arm926_dma_clean_range
.long arm926_dma_flush_range
ENTRY(cpu_arm926_dcache_clean_area)
#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
add r0, r0, #CACHE_DLINESIZE
subs r1, r1, #CACHE_DLINESIZE
bhi 1b
#endif
mcr p15, 0, r0, c7, c10, 4 @ drain WB
mov pc, lr mov pc, lr
/* =============================== PageTable ============================== */ /* =============================== PageTable ============================== */
/* /*
...@@ -336,7 +324,6 @@ ENTRY(cpu_arm926_icache_invalidate_page) ...@@ -336,7 +324,6 @@ ENTRY(cpu_arm926_icache_invalidate_page)
ENTRY(cpu_arm926_set_pgd) ENTRY(cpu_arm926_set_pgd)
mov ip, #0 mov ip, #0
#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH #ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
/* Any reason why we don't use mcr p15, 0, r0, c7, c7, 0 here? --rmk */
mcr p15, 0, ip, c7, c6, 0 @ invalidate D cache mcr p15, 0, ip, c7, c6, 0 @ invalidate D cache
#else #else
@ && 'Clean & Invalidate whole DCache' @ && 'Clean & Invalidate whole DCache'
...@@ -364,7 +351,7 @@ ENTRY(cpu_arm926_set_pte) ...@@ -364,7 +351,7 @@ ENTRY(cpu_arm926_set_pte)
bic r2, r2, #3 bic r2, r2, #3
orr r2, r2, #HPTE_TYPE_SMALL orr r2, r2, #HPTE_TYPE_SMALL
tst r1, #LPTE_USER | LPTE_EXEC @ User or Exec? tst r1, #LPTE_USER @ User?
orrne r2, r2, #HPTE_AP_READ orrne r2, r2, #HPTE_AP_READ
tst r1, #LPTE_WRITE | LPTE_DIRTY @ Write and Dirty? tst r1, #LPTE_WRITE | LPTE_DIRTY @ Write and Dirty?
...@@ -431,21 +418,21 @@ __arm926_setup: ...@@ -431,21 +418,21 @@ __arm926_setup:
bic r0, r0, #0x0e00 bic r0, r0, #0x0e00
bic r0, r0, #0x0002 bic r0, r0, #0x0002
bic r0, r0, #0x000c bic r0, r0, #0x000c
bic r0, r0, #0x1000 @ ...0 000. .... 000. bic r0, r0, #0x1000 @ ...0 000. .... 000.
/* /*
* Turn on what we want * Turn on what we want
*/ */
orr r0, r0, #0x0031 orr r0, r0, #0x0031
orr r0, r0, #0x2100 @ ..1. ...1 ..11 ...1 orr r0, r0, #0x2100 @ ..1. ...1 ..11 ...1
#ifdef CONFIG_CPU_CACHE_ROUND_ROBIN #ifdef CONFIG_CPU_CACHE_ROUND_ROBIN
orr r0, r0, #0x4000 @ .1.. .... .... .... orr r0, r0, #0x4000 @ .1.. .... .... ....
#endif #endif
#ifndef CONFIG_CPU_DCACHE_DISABLE #ifndef CONFIG_CPU_DCACHE_DISABLE
orr r0, r0, #0x0004 @ .... .... .... .1.. orr r0, r0, #0x0004 @ .... .... .... .1..
#endif #endif
#ifndef CONFIG_CPU_ICACHE_DISABLE #ifndef CONFIG_CPU_ICACHE_DISABLE
orr r0, r0, #0x1000 @ ...1 .... .... .... orr r0, r0, #0x1000 @ ...1 .... .... ....
#endif #endif
mov pc, lr mov pc, lr
...@@ -463,25 +450,9 @@ arm926_processor_functions: ...@@ -463,25 +450,9 @@ arm926_processor_functions:
.word cpu_arm926_proc_fin .word cpu_arm926_proc_fin
.word cpu_arm926_reset .word cpu_arm926_reset
.word cpu_arm926_do_idle .word cpu_arm926_do_idle
.word cpu_arm926_dcache_clean_area
/* cache */
.word cpu_arm926_cache_clean_invalidate_all
.word cpu_arm926_cache_clean_invalidate_range
/* dcache */
.word cpu_arm926_dcache_invalidate_range
.word cpu_arm926_dcache_clean_range
.word cpu_arm926_dcache_clean_page
.word cpu_arm926_dcache_clean_entry
/* icache */
.word cpu_arm926_icache_invalidate_range
.word cpu_arm926_icache_invalidate_page
/* pgtable */
.word cpu_arm926_set_pgd .word cpu_arm926_set_pgd
.word cpu_arm926_set_pte .word cpu_arm926_set_pte
.size arm926_processor_functions, . - arm926_processor_functions .size arm926_processor_functions, . - arm926_processor_functions
.type cpu_arch_name, #object .type cpu_arch_name, #object
...@@ -505,10 +476,10 @@ __arm926_proc_info: ...@@ -505,10 +476,10 @@ __arm926_proc_info:
b __arm926_setup b __arm926_setup
.long cpu_arch_name .long cpu_arch_name
.long cpu_elf_name .long cpu_elf_name
.long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB | \ .long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB | HWCAP_FAST_MULT | HWCAP_JAVA
HWCAP_FAST_MULT | HWCAP_JAVA
.long cpu_arm926_name .long cpu_arm926_name
.long arm926_processor_functions .long arm926_processor_functions
.long v4wbi_tlb_fns .long v4wbi_tlb_fns
.long v4wb_user_fns .long v4wb_user_fns
.long arm926_cache_fns
.size __arm926_proc_info, . - __arm926_proc_info .size __arm926_proc_info, . - __arm926_proc_info
...@@ -10,12 +10,7 @@ ...@@ -10,12 +10,7 @@
* MMU functions for SA110 * MMU functions for SA110
* *
* These are the low level assembler for performing cache and TLB * These are the low level assembler for performing cache and TLB
* functions on the StrongARM-110, StrongARM-1100 and StrongARM-1110. * functions on the StrongARM-110.
*
* Note that SA1100 and SA1110 share everything but their name and CPU ID.
*
* 12-jun-2000, Erik Mouw (J.A.K.Mouw@its.tudelft.nl):
* Flush the read buffer at context switches
*/ */
#include <linux/linkage.h> #include <linux/linkage.h>
#include <linux/init.h> #include <linux/init.h>
...@@ -25,61 +20,32 @@ ...@@ -25,61 +20,32 @@
#include <asm/hardware.h> #include <asm/hardware.h>
#include <asm/proc/pgtable.h> #include <asm/proc/pgtable.h>
/* This is the maximum size of an area which will be flushed. If the area
* is larger than this, then we flush the whole cache
*/
#define MAX_AREA_SIZE 32768
/* /*
* the cache line size of the I and D cache * the cache line size of the I and D cache
*/ */
#define DCACHELINESIZE 32 #define DCACHELINESIZE 32
/*
* and the page size
*/
#define PAGESIZE 4096
#define FLUSH_OFFSET 32768 #define FLUSH_OFFSET 32768
.macro flush_110_dcache rd, ra, re .macro flush_110_dcache rd, ra, re
ldr \rd, =flush_base ldr \rd, =flush_base
ldr \ra, [\rd] ldr \ra, [\rd]
eor \ra, \ra, #FLUSH_OFFSET eor \ra, \ra, #FLUSH_OFFSET
str \ra, [\rd] str \ra, [\rd]
add \re, \ra, #16384 @ only necessary for 16k add \re, \ra, #16384 @ only necessary for 16k
1001: ldr \rd, [\ra], #DCACHELINESIZE 1001: ldr \rd, [\ra], #DCACHELINESIZE
teq \re, \ra teq \re, \ra
bne 1001b bne 1001b
.endm .endm
.macro flush_1100_dcache rd, ra, re .data
ldr \rd, =flush_base flush_base:
ldr \ra, [\rd] .long FLUSH_BASE
eor \ra, \ra, #FLUSH_OFFSET .text
str \ra, [\rd]
add \re, \ra, #8192 @ only necessary for 8k
1001: ldr \rd, [\ra], #DCACHELINESIZE
teq \re, \ra
bne 1001b
#ifdef FLUSH_BASE_MINICACHE
add \ra, \ra, #FLUSH_BASE_MINICACHE - FLUSH_BASE
add \re, \ra, #512 @ only 512 bytes
1002: ldr \rd, [\ra], #DCACHELINESIZE
teq \re, \ra
bne 1002b
#endif
.endm
.data
flush_base: .long FLUSH_BASE
.text
/* /*
* cpu_sa110_check_bugs() * cpu_sa110_check_bugs()
*/ */
ENTRY(cpu_sa110_check_bugs) ENTRY(cpu_sa110_check_bugs)
ENTRY(cpu_sa1100_check_bugs)
mrs ip, cpsr mrs ip, cpsr
bic ip, ip, #PSR_F_BIT bic ip, ip, #PSR_F_BIT
msr cpsr, ip msr cpsr, ip
...@@ -89,7 +55,6 @@ ENTRY(cpu_sa1100_check_bugs) ...@@ -89,7 +55,6 @@ ENTRY(cpu_sa1100_check_bugs)
* cpu_sa110_proc_init() * cpu_sa110_proc_init()
*/ */
ENTRY(cpu_sa110_proc_init) ENTRY(cpu_sa110_proc_init)
ENTRY(cpu_sa1100_proc_init)
mov r0, #0 mov r0, #0
mcr p15, 0, r0, c15, c1, 2 @ Enable clock switching mcr p15, 0, r0, c15, c1, 2 @ Enable clock switching
mov pc, lr mov pc, lr
...@@ -101,7 +66,7 @@ ENTRY(cpu_sa110_proc_fin) ...@@ -101,7 +66,7 @@ ENTRY(cpu_sa110_proc_fin)
stmfd sp!, {lr} stmfd sp!, {lr}
mov ip, #PSR_F_BIT | PSR_I_BIT | SVC_MODE mov ip, #PSR_F_BIT | PSR_I_BIT | SVC_MODE
msr cpsr_c, ip msr cpsr_c, ip
bl cpu_sa110_cache_clean_invalidate_all @ clean caches bl v4wb_flush_kern_cache_all @ clean caches
1: mov r0, #0 1: mov r0, #0
mcr p15, 0, r0, c15, c2, 2 @ Disable clock switching mcr p15, 0, r0, c15, c2, 2 @ Disable clock switching
mrc p15, 0, r0, c1, c0, 0 @ ctrl register mrc p15, 0, r0, c1, c0, 0 @ ctrl register
...@@ -110,13 +75,6 @@ ENTRY(cpu_sa110_proc_fin) ...@@ -110,13 +75,6 @@ ENTRY(cpu_sa110_proc_fin)
mcr p15, 0, r0, c1, c0, 0 @ disable caches mcr p15, 0, r0, c1, c0, 0 @ disable caches
ldmfd sp!, {pc} ldmfd sp!, {pc}
ENTRY(cpu_sa1100_proc_fin)
stmfd sp!, {lr}
mov ip, #PSR_F_BIT | PSR_I_BIT | SVC_MODE
msr cpsr_c, ip
bl cpu_sa1100_cache_clean_invalidate_all @ clean caches
b 1b
/* /*
* cpu_sa110_reset(loc) * cpu_sa110_reset(loc)
* *
...@@ -128,7 +86,6 @@ ENTRY(cpu_sa1100_proc_fin) ...@@ -128,7 +86,6 @@ ENTRY(cpu_sa1100_proc_fin)
*/ */
.align 5 .align 5
ENTRY(cpu_sa110_reset) ENTRY(cpu_sa110_reset)
ENTRY(cpu_sa1100_reset)
mov ip, #0 mov ip, #0
mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches
mcr p15, 0, ip, c7, c10, 4 @ drain WB mcr p15, 0, ip, c7, c10, 4 @ drain WB
...@@ -151,204 +108,25 @@ ENTRY(cpu_sa1100_reset) ...@@ -151,204 +108,25 @@ ENTRY(cpu_sa1100_reset)
* 3 = switch to fast processor clock * 3 = switch to fast processor clock
*/ */
.align 5 .align 5
idle: mcr p15, 0, r0, c15, c8, 2 @ Wait for interrupt, cache aligned
mov r0, r0 @ safety
mov pc, lr
ENTRY(cpu_sa110_do_idle) ENTRY(cpu_sa110_do_idle)
mov ip, #0
cmp r0, #4
addcc pc, pc, r0, lsl #2
mov pc, lr
b idle
b idle
b slow_clock
b fast_clock
fast_clock:
mcr p15, 0, ip, c15, c1, 2 @ enable clock switching
mov pc, lr
slow_clock:
mcr p15, 0, ip, c15, c2, 2 @ disable clock switching mcr p15, 0, ip, c15, c2, 2 @ disable clock switching
ldr r1, =UNCACHEABLE_ADDR @ load from uncacheable loc ldr r1, =UNCACHEABLE_ADDR @ load from uncacheable loc
ldr r1, [r1, #0] @ force switch to MCLK ldr r1, [r1, #0] @ force switch to MCLK
mov pc, lr mov r0, r0 @ safety
mov r0, r0 @ safety
.align 5 mov r0, r0 @ safety
ENTRY(cpu_sa1100_do_idle) mcr p15, 0, r0, c15, c8, 2 @ Wait for interrupt, cache aligned
mov r0, r0 @ 4 nop padding mov r0, r0 @ safety
mov r0, r0 mov r0, r0 @ safety
mov r0, r0
mov r0, #0
ldr r1, =UNCACHEABLE_ADDR @ ptr to uncacheable address
mrs r2, cpsr
orr r3, r2, #192 @ disallow interrupts
msr cpsr_c, r3
@ --- aligned to a cache line
mcr p15, 0, r0, c15, c2, 2 @ disable clock switching
ldr r1, [r1, #0] @ force switch to MCLK
mcr p15, 0, r0, c15, c8, 2 @ wait for interrupt
mov r0, r0 @ safety mov r0, r0 @ safety
mcr p15, 0, r0, c15, c1, 2 @ enable clock switching mcr p15, 0, r0, c15, c1, 2 @ enable clock switching
msr cpsr_c, r2 @ allow interrupts
mov pc, lr mov pc, lr
/* ================================= CACHE ================================ */ /* ================================= CACHE ================================ */
/*
* cpu_sa110_cache_clean_invalidate_all (void)
*
* clean and invalidate all cache lines
*
* Note:
* 1. we should preserve r0 at all times
*/
.align 5
ENTRY(cpu_sa110_cache_clean_invalidate_all)
mov r2, #1
cpu_sa110_cache_clean_invalidate_all_r2:
flush_110_dcache r3, ip, r1
mov ip, #0
teq r2, #0
mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
mcr p15, 0, ip, c7, c10, 4 @ drain WB
mov pc, lr
.align 5
ENTRY(cpu_sa1100_cache_clean_invalidate_all)
mov r2, #1
cpu_sa1100_cache_clean_invalidate_all_r2:
flush_1100_dcache r3, ip, r1
mov ip, #0
teq r2, #0
mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
mcr p15, 0, r1, c9, c0, 0 @ invalidate RB
mcr p15, 0, ip, c7, c10, 4 @ drain WB
mov pc, lr
/*
* cpu_sa110_cache_clean_invalidate_range(start, end, flags)
*
* clean and invalidate all cache lines associated with this area of memory
*
* start: Area start address
* end: Area end address
* flags: nonzero for I cache as well
*/
.align 5
ENTRY(cpu_sa110_cache_clean_invalidate_range)
bic r0, r0, #DCACHELINESIZE - 1
sub r3, r1, r0
cmp r3, #MAX_AREA_SIZE
bhi cpu_sa110_cache_clean_invalidate_all_r2
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
add r0, r0, #DCACHELINESIZE
mcr p15, 0, r0, c7, c10, 1 @ clean D entry
mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
add r0, r0, #DCACHELINESIZE
cmp r0, r1
blo 1b
teq r2, #0
movne r0, #0
mcrne p15, 0, r0, c7, c5, 0 @ invalidate I cache
mov pc, lr
ENTRY(cpu_sa1100_cache_clean_invalidate_range)
sub r3, r1, r0
cmp r3, #MAX_AREA_SIZE
bhi cpu_sa1100_cache_clean_invalidate_all_r2
b 1b
/* ================================ D-CACHE =============================== */
/*
* cpu_sa110_dcache_invalidate_range(start, end)
*
* throw away all D-cached data in specified region without an obligation
* to write them back. Note however that we must clean the D-cached entries
* around the boundaries if the start and/or end address are not cache
* aligned.
*
* start: virtual start address
* end: virtual end address
*/
.align 5
ENTRY(cpu_sa110_dcache_invalidate_range)
ENTRY(cpu_sa1100_dcache_invalidate_range)
tst r0, #DCACHELINESIZE - 1
bic r0, r0, #DCACHELINESIZE - 1
mcrne p15, 0, r0, c7, c10, 1 @ clean D entry
tst r1, #DCACHELINESIZE - 1
mcrne p15, 0, r1, c7, c10, 1 @ clean D entry
1: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
add r0, r0, #DCACHELINESIZE
cmp r0, r1
blo 1b
mov pc, lr
/*
* cpu_sa110_dcache_clean_range(start, end)
*
* For the specified virtual address range, ensure that all caches contain
* clean data, such that peripheral accesses to the physical RAM fetch
* correct data.
*
* start: virtual start address
* end: virtual end address
*/
.align 5
ENTRY(cpu_sa110_dcache_clean_range)
bic r0, r0, #DCACHELINESIZE - 1
sub r1, r1, r0
cmp r1, #MAX_AREA_SIZE
mov r2, #0
bhi cpu_sa110_cache_clean_invalidate_all_r2
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
add r0, r0, #DCACHELINESIZE
mcr p15, 0, r0, c7, c10, 1 @ clean D entry
add r0, r0, #DCACHELINESIZE
subs r1, r1, #2 * DCACHELINESIZE
bpl 1b
mcr p15, 0, r2, c7, c10, 4 @ drain WB
mov pc, lr
ENTRY(cpu_sa1100_dcache_clean_range)
bic r0, r0, #DCACHELINESIZE - 1
sub r1, r1, r0
cmp r1, #MAX_AREA_SIZE
mov r2, #0
bhi cpu_sa1100_cache_clean_invalidate_all_r2
b 1b
/* /*
* cpu_sa110_clean_dcache_page(page) * cpu_sa110_dcache_clean_area(addr,sz)
*
* Cleans a single page of dcache so that if we have any future aliased
* mappings, they will be consistent at the time that they are created.
*
* Note:
* 1. we don't need to flush the write buffer in this case.
* 2. we don't invalidate the entries since when we write the page
* out to disk, the entries may get reloaded into the cache.
*/
.align 5
ENTRY(cpu_sa110_dcache_clean_page)
ENTRY(cpu_sa1100_dcache_clean_page)
mov r1, #PAGESIZE
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
add r0, r0, #DCACHELINESIZE
mcr p15, 0, r0, c7, c10, 1 @ clean D entry
add r0, r0, #DCACHELINESIZE
subs r1, r1, #2 * DCACHELINESIZE
bne 1b
mov pc, lr
/*
* cpu_sa110_dcache_clean_entry(addr)
* *
* Clean the specified entry of any caches such that the MMU * Clean the specified entry of any caches such that the MMU
* translation fetches will obtain correct data. * translation fetches will obtain correct data.
...@@ -356,35 +134,11 @@ ENTRY(cpu_sa1100_dcache_clean_page) ...@@ -356,35 +134,11 @@ ENTRY(cpu_sa1100_dcache_clean_page)
* addr: cache-unaligned virtual address * addr: cache-unaligned virtual address
*/ */
.align 5 .align 5
ENTRY(cpu_sa110_dcache_clean_entry) ENTRY(cpu_sa110_dcache_clean_area)
ENTRY(cpu_sa1100_dcache_clean_entry) 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
mcr p15, 0, r0, c7, c10, 1 @ clean D entry
mcr p15, 0, r0, c7, c10, 4 @ drain WB
mov pc, lr
/* ================================ I-CACHE =============================== */
/*
* cpu_sa110_icache_invalidate_range(start, end)
*
* invalidate a range of virtual addresses from the Icache
*
* start: virtual start address
* end: virtual end address
*/
.align 5
ENTRY(cpu_sa110_icache_invalidate_range)
ENTRY(cpu_sa1100_icache_invalidate_range)
bic r0, r0, #DCACHELINESIZE - 1
1: mcr p15, 0, r0, c7, c10, 1 @ Clean D entry
add r0, r0, #DCACHELINESIZE add r0, r0, #DCACHELINESIZE
cmp r0, r1 subs r1, r1, #DCACHELINESIZE
blo 1b bhi 1b
mov r0, #0
mcr p15, 0, r0, c7, c10, 4 @ drain WB
ENTRY(cpu_sa110_icache_invalidate_page)
ENTRY(cpu_sa1100_icache_invalidate_page)
mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
mov pc, lr mov pc, lr
/* =============================== PageTable ============================== */ /* =============================== PageTable ============================== */
...@@ -406,24 +160,6 @@ ENTRY(cpu_sa110_set_pgd) ...@@ -406,24 +160,6 @@ ENTRY(cpu_sa110_set_pgd)
mcr p15, 0, r1, c8, c7, 0 @ invalidate I & D TLBs mcr p15, 0, r1, c8, c7, 0 @ invalidate I & D TLBs
mov pc, lr mov pc, lr
/*
* cpu_sa1100_set_pgd(pgd)
*
* Set the translation base pointer to be as described by pgd.
*
* pgd: new page tables
*/
.align 5
ENTRY(cpu_sa1100_set_pgd)
flush_1100_dcache r3, ip, r1
mov ip, #0
mcr p15, 0, ip, c7, c5, 0 @ invalidate I cache
mcr p15, 0, ip, c9, c0, 0 @ invalidate RB
mcr p15, 0, ip, c7, c10, 4 @ drain WB
mcr p15, 0, r0, c2, c0, 0 @ load page table pointer
mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs
mov pc, lr
/* /*
* cpu_sa110_set_pte(ptep, pte) * cpu_sa110_set_pte(ptep, pte)
* *
...@@ -431,9 +167,6 @@ ENTRY(cpu_sa1100_set_pgd) ...@@ -431,9 +167,6 @@ ENTRY(cpu_sa1100_set_pgd)
*/ */
.align 5 .align 5
ENTRY(cpu_sa110_set_pte) ENTRY(cpu_sa110_set_pte)
ENTRY(cpu_sa1100_set_pte)
tst r0, #2048
streq r0, [r0, -r0] @ BUG_ON
str r1, [r0], #-2048 @ linux version str r1, [r0], #-2048 @ linux version
eor r1, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY eor r1, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY
...@@ -442,7 +175,7 @@ ENTRY(cpu_sa1100_set_pte) ...@@ -442,7 +175,7 @@ ENTRY(cpu_sa1100_set_pte)
bic r2, r2, #3 bic r2, r2, #3
orr r2, r2, #PTE_TYPE_SMALL orr r2, r2, #PTE_TYPE_SMALL
tst r1, #L_PTE_USER | L_PTE_EXEC @ User or Exec? tst r1, #L_PTE_USER @ User or Exec?
orrne r2, r2, #PTE_SMALL_AP_URO_SRW orrne r2, r2, #PTE_SMALL_AP_URO_SRW
tst r1, #L_PTE_WRITE | L_PTE_DIRTY @ Write and Dirty? tst r1, #L_PTE_WRITE | L_PTE_DIRTY @ Write and Dirty?
...@@ -460,30 +193,16 @@ ENTRY(cpu_sa1100_set_pte) ...@@ -460,30 +193,16 @@ ENTRY(cpu_sa1100_set_pte)
cpu_sa110_name: cpu_sa110_name:
.asciz "StrongARM-110" .asciz "StrongARM-110"
cpu_sa1100_name:
.asciz "StrongARM-1100"
cpu_sa1110_name:
.asciz "StrongARM-1110"
.align .align
__INIT __INIT
__sa1100_setup: @ Allow read-buffer operations from userland
mcr p15, 0, r0, c9, c0, 5
mrc p15, 0, r0, c1, c0 @ get control register v4
bic r0, r0, #0x0e00 @ ..VI ZFRS BLDP WCAM
bic r0, r0, #0x0002 @ .... 000. .... ..0.
orr r0, r0, #0x003d
orr r0, r0, #0x3100 @ ..11 ...1 ..11 11.1
b __setup_common
__sa110_setup: __sa110_setup:
mrc p15, 0, r0, c1, c0 @ get control register v4 mrc p15, 0, r0, c1, c0 @ get control register v4
bic r0, r0, #0x2e00 @ ..VI ZFRS BLDP WCAM bic r0, r0, #0x2e00 @ ..VI ZFRS BLDP WCAM
bic r0, r0, #0x0002 @ ..0. 000. .... ..0. bic r0, r0, #0x0002 @ ..0. 000. .... ..0.
orr r0, r0, #0x003d orr r0, r0, #0x003d
orr r0, r0, #0x1100 @ ...1 ...1 ..11 11.1 orr r0, r0, #0x1100 @ ...1 ...1 ..11 11.1
__setup_common:
mov r10, #0 mov r10, #0
mcr p15, 0, r10, c7, c7 @ invalidate I,D caches on v4 mcr p15, 0, r10, c7, c7 @ invalidate I,D caches on v4
mcr p15, 0, r10, c7, c10, 4 @ drain write buffer on v4 mcr p15, 0, r10, c7, c10, 4 @ drain write buffer on v4
...@@ -509,19 +228,8 @@ ENTRY(sa110_processor_functions) ...@@ -509,19 +228,8 @@ ENTRY(sa110_processor_functions)
.word cpu_sa110_reset .word cpu_sa110_reset
.word cpu_sa110_do_idle .word cpu_sa110_do_idle
/* cache */
.word cpu_sa110_cache_clean_invalidate_all
.word cpu_sa110_cache_clean_invalidate_range
/* dcache */ /* dcache */
.word cpu_sa110_dcache_invalidate_range .word cpu_sa110_dcache_clean_area
.word cpu_sa110_dcache_clean_range
.word cpu_sa110_dcache_clean_page
.word cpu_sa110_dcache_clean_entry
/* icache */
.word cpu_sa110_icache_invalidate_range
.word cpu_sa110_icache_invalidate_page
/* pgtable */ /* pgtable */
.word cpu_sa110_set_pgd .word cpu_sa110_set_pgd
...@@ -529,38 +237,6 @@ ENTRY(sa110_processor_functions) ...@@ -529,38 +237,6 @@ ENTRY(sa110_processor_functions)
.size sa110_processor_functions, . - sa110_processor_functions .size sa110_processor_functions, . - sa110_processor_functions
/*
* SA1100 and SA1110 share the same function calls
*/
.type sa1100_processor_functions, #object
ENTRY(sa1100_processor_functions)
.word v4_early_abort
.word cpu_sa1100_check_bugs
.word cpu_sa1100_proc_init
.word cpu_sa1100_proc_fin
.word cpu_sa1100_reset
.word cpu_sa1100_do_idle
/* cache */
.word cpu_sa1100_cache_clean_invalidate_all
.word cpu_sa1100_cache_clean_invalidate_range
/* dcache */
.word cpu_sa1100_dcache_invalidate_range
.word cpu_sa1100_dcache_clean_range
.word cpu_sa1100_dcache_clean_page
.word cpu_sa1100_dcache_clean_entry
/* icache */
.word cpu_sa1100_icache_invalidate_range
.word cpu_sa1100_icache_invalidate_page
/* pgtable */
.word cpu_sa1100_set_pgd
.word cpu_sa1100_set_pte
.size sa1100_processor_functions, . - sa1100_processor_functions
.type cpu_arch_name, #object .type cpu_arch_name, #object
cpu_arch_name: cpu_arch_name:
.asciz "armv4" .asciz "armv4"
...@@ -574,7 +250,6 @@ cpu_elf_name: ...@@ -574,7 +250,6 @@ cpu_elf_name:
.section ".proc.info", #alloc, #execinstr .section ".proc.info", #alloc, #execinstr
#ifdef CONFIG_CPU_SA110
.type __sa110_proc_info,#object .type __sa110_proc_info,#object
__sa110_proc_info: __sa110_proc_info:
.long 0x4401a100 .long 0x4401a100
...@@ -588,37 +263,5 @@ __sa110_proc_info: ...@@ -588,37 +263,5 @@ __sa110_proc_info:
.long sa110_processor_functions .long sa110_processor_functions
.long v4wb_tlb_fns .long v4wb_tlb_fns
.long v4wb_user_fns .long v4wb_user_fns
.long v4wb_cache_fns
.size __sa110_proc_info, . - __sa110_proc_info .size __sa110_proc_info, . - __sa110_proc_info
#endif
#ifdef CONFIG_CPU_SA1100
.type __sa1100_proc_info,#object
__sa1100_proc_info:
.long 0x4401a110
.long 0xfffffff0
.long 0x00000c0e
b __sa1100_setup
.long cpu_arch_name
.long cpu_elf_name
.long HWCAP_SWP | HWCAP_HALF | HWCAP_26BIT | HWCAP_FAST_MULT
.long cpu_sa1100_name
.long sa1100_processor_functions
.long v4wb_tlb_fns
.long v4_mc_user_fns
.size __sa1100_proc_info, . - __sa1100_proc_info
.type __sa1110_proc_info,#object
__sa1110_proc_info:
.long 0x6901b110
.long 0xfffffff0
.long 0x00000c0e
b __sa1100_setup
.long cpu_arch_name
.long cpu_elf_name
.long HWCAP_SWP | HWCAP_HALF | HWCAP_26BIT | HWCAP_FAST_MULT
.long cpu_sa1110_name
.long sa1100_processor_functions
.long v4wb_tlb_fns
.long v4_mc_user_fns
.size __sa1110_proc_info, . - __sa1110_proc_info
#endif
/*
* linux/arch/arm/mm/proc-sa110.S
*
* Copyright (C) 1997-2002 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* MMU functions for SA110
*
* These are the low level assembler for performing cache and TLB
* functions on the StrongARM-1100 and StrongARM-1110.
*
* Note that SA1100 and SA1110 share everything but their name and CPU ID.
*
* 12-jun-2000, Erik Mouw (J.A.K.Mouw@its.tudelft.nl):
* Flush the read buffer at context switches
*/
#include <linux/linkage.h>
#include <linux/init.h>
#include <asm/assembler.h>
#include <asm/constants.h>
#include <asm/procinfo.h>
#include <asm/hardware.h>
#include <asm/proc/pgtable.h>
/*
* the cache line size of the I and D cache
*/
#define DCACHELINESIZE 32
#define FLUSH_OFFSET 32768
.macro flush_1100_dcache rd, ra, re
ldr \rd, =flush_base
ldr \ra, [\rd]
eor \ra, \ra, #FLUSH_OFFSET
str \ra, [\rd]
add \re, \ra, #8192 @ only necessary for 8k
1001: ldr \rd, [\ra], #DCACHELINESIZE
teq \re, \ra
bne 1001b
#ifdef FLUSH_BASE_MINICACHE
add \ra, \ra, #FLUSH_BASE_MINICACHE - FLUSH_BASE
add \re, \ra, #512 @ only 512 bytes
1002: ldr \rd, [\ra], #DCACHELINESIZE
teq \re, \ra
bne 1002b
#endif
.endm
.data
flush_base:
.long FLUSH_BASE
.text
/*
* cpu_sa1100_check_bugs()
*/
ENTRY(cpu_sa1100_check_bugs)
mrs ip, cpsr
bic ip, ip, #PSR_F_BIT
msr cpsr, ip
mov pc, lr
__INIT
/*
* cpu_sa1100_proc_init()
*/
ENTRY(cpu_sa1100_proc_init)
mov r0, #0
mcr p15, 0, r0, c15, c1, 2 @ Enable clock switching
mcr p15, 0, r0, c9, c0, 5 @ Allow read-buffer operations from userland
mov pc, lr
.previous
/*
* cpu_sa1100_proc_fin()
*
* Prepare the CPU for reset:
* - Disable interrupts
* - Clean and turn off caches.
*/
ENTRY(cpu_sa1100_proc_fin)
stmfd sp!, {lr}
mov ip, #PSR_F_BIT | PSR_I_BIT | SVC_MODE
msr cpsr_c, ip
flush_1100_dcache r0, r1, r2 @ clean caches
mov r0, #0
mcr p15, 0, r0, c15, c2, 2 @ Disable clock switching
mrc p15, 0, r0, c1, c0, 0 @ ctrl register
bic r0, r0, #0x1000 @ ...i............
bic r0, r0, #0x000e @ ............wca.
mcr p15, 0, r0, c1, c0, 0 @ disable caches
ldmfd sp!, {pc}
/*
* cpu_sa1100_reset(loc)
*
* Perform a soft reset of the system. Put the CPU into the
* same state as it would be if it had been reset, and branch
* to what would be the reset vector.
*
* loc: location to jump to for soft reset
*/
.align 5
ENTRY(cpu_sa1100_reset)
mov ip, #0
mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches
mcr p15, 0, ip, c7, c10, 4 @ drain WB
mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs
mrc p15, 0, ip, c1, c0, 0 @ ctrl register
bic ip, ip, #0x000f @ ............wcam
bic ip, ip, #0x1100 @ ...i...s........
mcr p15, 0, ip, c1, c0, 0 @ ctrl register
mov pc, r0
/*
* cpu_sa1100_do_idle(type)
*
* Cause the processor to idle
*
* type: call type:
* 0 = slow idle
* 1 = fast idle
* 2 = switch to slow processor clock
* 3 = switch to fast processor clock
*/
.align 5
ENTRY(cpu_sa1100_do_idle)
mov r0, r0 @ 4 nop padding
mov r0, r0
mov r0, r0
mov r0, r0 @ 4 nop padding
mov r0, r0
mov r0, r0
mov r0, #0
ldr r1, =UNCACHEABLE_ADDR @ ptr to uncacheable address
@ --- aligned to a cache line
mcr p15, 0, r0, c15, c2, 2 @ disable clock switching
ldr r1, [r1, #0] @ force switch to MCLK
mcr p15, 0, r0, c15, c8, 2 @ wait for interrupt
mov r0, r0 @ safety
mcr p15, 0, r0, c15, c1, 2 @ enable clock switching
mov pc, lr
/* ================================= CACHE ================================ */
/*
* cpu_sa1100_dcache_clean_area(addr,sz)
*
* Clean the specified entry of any caches such that the MMU
* translation fetches will obtain correct data.
*
* addr: cache-unaligned virtual address
*/
.align 5
ENTRY(cpu_sa1100_dcache_clean_area)
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
add r0, r0, #DCACHELINESIZE
subs r1, r1, #DCACHELINESIZE
bhi 1b
mov pc, lr
/* =============================== PageTable ============================== */
/*
* cpu_sa1100_set_pgd(pgd)
*
* Set the translation base pointer to be as described by pgd.
*
* pgd: new page tables
*/
.align 5
ENTRY(cpu_sa1100_set_pgd)
flush_1100_dcache r3, ip, r1
mov ip, #0
mcr p15, 0, ip, c7, c5, 0 @ invalidate I cache
mcr p15, 0, ip, c9, c0, 0 @ invalidate RB
mcr p15, 0, ip, c7, c10, 4 @ drain WB
mcr p15, 0, r0, c2, c0, 0 @ load page table pointer
mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs
mov pc, lr
/*
* cpu_sa1100_set_pte(ptep, pte)
*
* Set a PTE and flush it out
*/
.align 5
ENTRY(cpu_sa1100_set_pte)
str r1, [r0], #-2048 @ linux version
eor r1, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY
bic r2, r1, #0xff0
bic r2, r2, #3
orr r2, r2, #PTE_TYPE_SMALL
tst r1, #L_PTE_USER @ User or Exec?
orrne r2, r2, #PTE_SMALL_AP_URO_SRW
tst r1, #L_PTE_WRITE | L_PTE_DIRTY @ Write and Dirty?
orreq r2, r2, #PTE_SMALL_AP_UNO_SRW
tst r1, #L_PTE_PRESENT | L_PTE_YOUNG @ Present and Young?
movne r2, #0
str r2, [r0] @ hardware version
mov r0, r0
mcr p15, 0, r0, c7, c10, 1 @ clean D entry
mcr p15, 0, r0, c7, c10, 4 @ drain WB
mov pc, lr
cpu_sa1100_name:
.asciz "StrongARM-1100"
cpu_sa1110_name:
.asciz "StrongARM-1110"
.align
__INIT
__sa1100_setup:
mov r10, #0
mcr p15, 0, r10, c7, c7 @ invalidate I,D caches on v4
mcr p15, 0, r10, c7, c10, 4 @ drain write buffer on v4
mcr p15, 0, r10, c8, c7 @ invalidate I,D TLBs on v4
mov r0, #0x1f @ Domains 0, 1 = client
mcr p15, 0, r0, c3, c0 @ load domain access register
mcr p15, 0, r4, c2, c0 @ load page table pointer
mrc p15, 0, r0, c1, c0 @ get control register v4
bic r0, r0, #0x0e00 @ ..VI ZFRS BLDP WCAM
bic r0, r0, #0x0002 @ .... 000. .... ..0.
orr r0, r0, #0x003d
orr r0, r0, #0x3100 @ ..11 ...1 ..11 11.1
mov pc, lr
.text
/*
* Purpose : Function pointers used to access above functions - all calls
* come through these
*/
/*
* SA1100 and SA1110 share the same function calls
*/
.type sa1100_processor_functions, #object
ENTRY(sa1100_processor_functions)
.word v4_early_abort
.word cpu_sa1100_check_bugs
.word cpu_sa1100_proc_init
.word cpu_sa1100_proc_fin
.word cpu_sa1100_reset
.word cpu_sa1100_do_idle
.word cpu_sa1100_dcache_clean_area
.word cpu_sa1100_set_pgd
.word cpu_sa1100_set_pte
.size sa1100_processor_functions, . - sa1100_processor_functions
.type cpu_arch_name, #object
cpu_arch_name:
.asciz "armv4"
.size cpu_arch_name, . - cpu_arch_name
.type cpu_elf_name, #object
cpu_elf_name:
.asciz "v4"
.size cpu_elf_name, . - cpu_elf_name
.align
.section ".proc.info", #alloc, #execinstr
.type __sa1100_proc_info,#object
__sa1100_proc_info:
.long 0x4401a110
.long 0xfffffff0
.long 0x00000c0e
b __sa1100_setup
.long cpu_arch_name
.long cpu_elf_name
.long HWCAP_SWP | HWCAP_HALF | HWCAP_26BIT | HWCAP_FAST_MULT
.long cpu_sa1100_name
.long sa1100_processor_functions
.long v4wb_tlb_fns
.long v4_mc_user_fns
.long v4wb_cache_fns
.size __sa1100_proc_info, . - __sa1100_proc_info
.type __sa1110_proc_info,#object
__sa1110_proc_info:
.long 0x6901b110
.long 0xfffffff0
.long 0x00000c0e
b __sa1100_setup
.long cpu_arch_name
.long cpu_elf_name
.long HWCAP_SWP | HWCAP_HALF | HWCAP_26BIT | HWCAP_FAST_MULT
.long cpu_sa1110_name
.long sa1100_processor_functions
.long v4wb_tlb_fns
.long v4_mc_user_fns
.long v4wb_cache_fns
.size __sa1110_proc_info, . - __sa1110_proc_info
...@@ -23,10 +23,11 @@ ...@@ -23,10 +23,11 @@
#include <linux/linkage.h> #include <linux/linkage.h>
#include <linux/init.h> #include <linux/init.h>
#include <asm/assembler.h> #include <asm/assembler.h>
#include <asm/constants.h>
#include <asm/procinfo.h> #include <asm/procinfo.h>
#include <asm/hardware.h> #include <asm/hardware.h>
#include <asm/proc/pgtable.h> #include <asm/proc/pgtable.h>
#include <asm/page.h>
#include "proc-macros.S"
/* /*
* This is the maximum size of an area which will be flushed. If the area * This is the maximum size of an area which will be flushed. If the area
...@@ -44,11 +45,6 @@ ...@@ -44,11 +45,6 @@
*/ */
#define CACHESIZE 32768 #define CACHESIZE 32768
/*
* and the page size
*/
#define PAGESIZE 4096
/* /*
* Virtual address used to allocate the cache when flushed * Virtual address used to allocate the cache when flushed
* *
...@@ -135,11 +131,11 @@ ENTRY(cpu_xscale_proc_fin) ...@@ -135,11 +131,11 @@ ENTRY(cpu_xscale_proc_fin)
str lr, [sp, #-4]! str lr, [sp, #-4]!
mov r0, #PSR_F_BIT|PSR_I_BIT|SVC_MODE mov r0, #PSR_F_BIT|PSR_I_BIT|SVC_MODE
msr cpsr_c, r0 msr cpsr_c, r0
bl xscale_flush_kern_cache_all @ clean caches
mrc p15, 0, r0, c1, c0, 0 @ ctrl register mrc p15, 0, r0, c1, c0, 0 @ ctrl register
bic r0, r0, #0x1800 @ ...IZ........... bic r0, r0, #0x1800 @ ...IZ...........
bic r0, r0, #0x0006 @ .............CA. bic r0, r0, #0x0006 @ .............CA.
mcr p15, 0, r0, c1, c0, 0 @ disable caches mcr p15, 0, r0, c1, c0, 0 @ disable caches
bl cpu_xscale_cache_clean_invalidate_all @ clean caches
ldr pc, [sp], #4 ldr pc, [sp], #4
/* /*
...@@ -168,16 +164,10 @@ ENTRY(cpu_xscale_reset) ...@@ -168,16 +164,10 @@ ENTRY(cpu_xscale_reset)
mov pc, r0 mov pc, r0
/* /*
* cpu_xscale_do_idle(type) * cpu_xscale_do_idle()
* *
* Cause the processor to idle * Cause the processor to idle
* *
* type:
* 0 = slow idle
* 1 = fast idle
* 2 = switch to slow processor clock
* 3 = switch to fast processor clock
*
* For now we do nothing but go to idle mode for every case * For now we do nothing but go to idle mode for every case
* *
* XScale supports clock switching, but using idle mode support * XScale supports clock switching, but using idle mode support
...@@ -193,226 +183,179 @@ ENTRY(cpu_xscale_do_idle) ...@@ -193,226 +183,179 @@ ENTRY(cpu_xscale_do_idle)
/* ================================= CACHE ================================ */ /* ================================= CACHE ================================ */
/* /*
* cpu_xscale_cache_clean_invalidate_all (void) * flush_user_cache_all()
* *
* clean and invalidate all cache lines * Invalidate all cache entries in a particular address
* space.
*/
ENTRY(xscale_flush_user_cache_all)
/* FALLTHROUGH */
/*
* flush_kern_cache_all()
* *
* Note: * Clean and invalidate the entire cache.
* 1. We should preserve r0 at all times.
* 2. Even if this function implies cache "invalidation" by its name,
* we don't need to actually use explicit invalidation operations
* since the goal is to discard all valid references from the cache
* and the cleaning of it already has that effect.
* 3. Because of 2 above and the fact that kernel space memory is always
* coherent across task switches there is no need to worry about
* inconsistencies due to interrupts, ence no irq disabling.
*/ */
.align 5 ENTRY(xscale_flush_kern_cache_all)
ENTRY(cpu_xscale_cache_clean_invalidate_all) mov r2, #VM_EXEC
mov r2, #1 mov ip, #0
cpu_xscale_cache_clean_invalidate_all_r2: __flush_whole_cache:
clean_d_cache r0, r1 clean_d_cache r0, r1
teq r2, #0 tst r2, #VM_EXEC
mcrne p15, 0, ip, c7, c5, 0 @ Invalidate I cache & BTB mcrne p15, 0, ip, c7, c5, 0 @ Invalidate I cache & BTB
mcr p15, 0, ip, c7, c10, 4 @ Drain Write (& Fill) Buffer mcrne p15, 0, ip, c7, c10, 4 @ Drain Write (& Fill) Buffer
mov pc, lr mov pc, lr
/* /*
* cpu_xscale_cache_clean_invalidate_range(start, end, flags) * flush_user_cache_range(start, end, vm_flags)
* *
* clean and invalidate all cache lines associated with this area of memory * Invalidate a range of cache entries in the specified
* address space.
* *
* start: Area start address * - start - start address (may not be aligned)
* end: Area end address * - end - end address (exclusive, may not be aligned)
* flags: nonzero for I cache as well * - vma - vma_area_struct describing address space
*/ */
.align 5 .align 5
ENTRY(cpu_xscale_cache_clean_invalidate_range) ENTRY(xscale_flush_user_cache_range)
bic r0, r0, #CACHELINESIZE - 1 @ round down to cache line mov ip, #0
sub r3, r1, r0 sub r3, r1, r0 @ calculate total size
cmp r3, #MAX_AREA_SIZE cmp r3, #MAX_AREA_SIZE
bhi cpu_xscale_cache_clean_invalidate_all_r2 bhs __flush_whole_cache
1: mcr p15, 0, r0, c7, c10, 1 @ Clean D cache line
1: tst r2, #VM_EXEC
mcrne p15, 0, r0, c7, c5, 1 @ Invalidate I cache line
mcr p15, 0, r0, c7, c10, 1 @ Clean D cache line
mcr p15, 0, r0, c7, c6, 1 @ Invalidate D cache line mcr p15, 0, r0, c7, c6, 1 @ Invalidate D cache line
add r0, r0, #CACHELINESIZE add r0, r0, #CACHELINESIZE
cmp r0, r1 cmp r0, r1
blo 1b blo 1b
teq r2, #0 tst r2, #VM_EXEC
mcr p15, 0, ip, c7, c10, 4 @ Drain Write (& Fill) Buffer mcrne p15, 0, ip, c7, c5, 6 @ Invalidate BTB
moveq pc, lr mcrne p15, 0, ip, c7, c10, 4 @ Drain Write (& Fill) Buffer
sub r0, r0, r3
1: mcr p15, 0, r0, c7, c5, 1 @ Invalidate I cache line
add r0, r0, #CACHELINESIZE
cmp r0, r1
blo 1b
mcr p15, 0, ip, c7, c5, 6 @ Invalidate BTB
mov pc, lr mov pc, lr
/* /*
* cpu_xscale_flush_ram_page(page) * coherent_kern_range(start, end)
* *
* clean all cache lines associated with this memory page * Ensure coherency between the Icache and the Dcache in the
* region described by start. If you have non-snooping
* Harvard caches, you need to implement this function.
* *
* page: page to clean * - start - virtual start address
* - end - virtual end address
*/ */
.align 5 ENTRY(xscale_coherent_kern_range)
ENTRY(cpu_xscale_flush_ram_page) bic r0, r0, #CACHELINESIZE - 1
mov r1, #PAGESIZE 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
1: mcr p15, 0, r0, c7, c10, 1 @ Clean D cache line
add r0, r0, #CACHELINESIZE
mcr p15, 0, r0, c7, c10, 1 @ Clean D cache line
add r0, r0, #CACHELINESIZE add r0, r0, #CACHELINESIZE
subs r1, r1, #2 * CACHELINESIZE cmp r0, r1
bne 1b blo 1b
mcr p15, 0, ip, c7, c10, 4 @ Drain Write (& Fill) Buffer mov r0, #0
mcr p15, 0, r0, c7, c5, 0 @ Invalidate I cache & BTB
mcr p15, 0, r0, c7, c10, 4 @ Drain Write (& Fill) Buffer
mov pc, lr mov pc, lr
/* ================================ D-CACHE =============================== */
/* /*
* cpu_xscale_dcache_invalidate_range(start, end) * flush_kern_dcache_page(void *page)
* *
* throw away all D-cached data in specified region without an obligation * Ensure no D cache aliasing occurs, either with itself or
* to write them back. Note however that on XScale we must clean all * the I cache
* entries also due to hardware errata (80200 A0 & A1 only).
* *
* start: virtual start address * - addr - page aligned address
* end: virtual end address
*/ */
.align 5 ENTRY(xscale_flush_kern_dcache_page)
ENTRY(cpu_xscale_dcache_invalidate_range) add r1, r0, #PAGE_SZ
mrc p15, 0, r2, c0, c0, 0 @ Read part no. 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
eor r2, r2, #0x69000000 mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
eor r2, r2, #0x00052000 @ 80200 XX part no.
bics r2, r2, #0x1 @ Clear LSB in revision field
moveq r2, #0
beq cpu_xscale_cache_clean_invalidate_range @ An 80200 A0 or A1
tst r0, #CACHELINESIZE - 1
mcrne p15, 0, r0, c7, c10, 1 @ Clean D cache line
tst r1, #CACHELINESIZE - 1
mcrne p15, 0, r1, c7, c10, 1 @ Clean D cache line
bic r0, r0, #CACHELINESIZE - 1 @ round down to cache line
1: mcr p15, 0, r0, c7, c6, 1 @ Invalidate D cache line
add r0, r0, #CACHELINESIZE add r0, r0, #CACHELINESIZE
cmp r0, r1 cmp r0, r1
blo 1b blo 1b
mov r0, #0
mcr p15, 0, r0, c7, c5, 0 @ Invalidate I cache & BTB
mcr p15, 0, r0, c7, c10, 4 @ Drain Write (& Fill) Buffer
mov pc, lr mov pc, lr
/* /*
* cpu_xscale_dcache_clean_range(start, end) * dma_inv_range(start, end)
* *
* For the specified virtual address range, ensure that all caches contain * Invalidate (discard) the specified virtual address range.
* clean data, such that peripheral accesses to the physical RAM fetch * May not write back any entries. If 'start' or 'end'
* correct data. * are not cache line aligned, those lines must be written
* back.
* *
* start: virtual start address * - start - virtual start address
* end: virtual end address * - end - virtual end address
*/ */
.align 5 ENTRY(xscale_dma_inv_range)
ENTRY(cpu_xscale_dcache_clean_range) mrc p15, 0, r2, c0, c0, 0 @ read ID
bic r0, r0, #CACHELINESIZE - 1 eor r2, r2, #0x69000000
sub r2, r1, r0 eor r2, r2, #0x00052000
cmp r2, #MAX_AREA_SIZE bics r2, r2, #1
movhi r2, #0 beq xscale_dma_flush_range
bhi cpu_xscale_cache_clean_invalidate_all_r2
1: mcr p15, 0, r0, c7, c10, 1 @ Clean D cache line tst r0, #CACHELINESIZE - 1
add r0, r0, #CACHELINESIZE bic r0, r0, #CACHELINESIZE - 1
mcr p15, 0, r0, c7, c10, 1 @ Clean D cache line mcrne p15, 0, r0, c7, c10, 1 @ clean D entry
tst r1, #CACHELINESIZE - 1
mcrne p15, 0, r1, c7, c10, 1 @ clean D entry
1: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
add r0, r0, #CACHELINESIZE add r0, r0, #CACHELINESIZE
cmp r0, r1 cmp r0, r1
blo 1b blo 1b
mcr p15, 0, ip, c7, c10, 4 @ Drain Write (& Fill) Buffer mcr p15, 0, r0, c7, c10, 1 @ Drain Write (& Fill) Buffer
mov pc, lr mov pc, lr
/* /*
* cpu_xscale_clean_dcache_page(page) * dma_clean_range(start, end)
* *
* Cleans a single page of dcache so that if we have any future aliased * Clean the specified virtual address range.
* mappings, they will be consistent at the time that they are created.
* *
* Note: * - start - virtual start address
* 1. we don't need to flush the write buffer in this case. [really? -Nico] * - end - virtual end address
* 2. we don't invalidate the entries since when we write the page
* out to disk, the entries may get reloaded into the cache.
*/ */
.align 5 ENTRY(xscale_dma_clean_range)
ENTRY(cpu_xscale_dcache_clean_page) bic r0, r0, #CACHELINESIZE - 1
mov r1, #PAGESIZE 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
1: mcr p15, 0, r0, c7, c10, 1 @ Clean D cache line
add r0, r0, #CACHELINESIZE
mcr p15, 0, r0, c7, c10, 1 @ Clean D cache line
add r0, r0, #CACHELINESIZE
mcr p15, 0, r0, c7, c10, 1 @ Clean D cache line
add r0, r0, #CACHELINESIZE
mcr p15, 0, r0, c7, c10, 1 @ Clean D cache line
add r0, r0, #CACHELINESIZE add r0, r0, #CACHELINESIZE
subs r1, r1, #4 * CACHELINESIZE cmp r0, r1
bne 1b blo 1b
mcr p15, 0, ip, c7, c10, 4 @ Drain Write (& Fill) Buffer mcr p15, 0, r0, c7, c10, 1 @ Drain Write (& Fill) Buffer
mov pc, lr
/*
* cpu_xscale_dcache_clean_entry(addr)
*
* Clean the specified entry of any caches such that the MMU
* translation fetches will obtain correct data.
*
* addr: cache-unaligned virtual address
*/
.align 5
ENTRY(cpu_xscale_dcache_clean_entry)
mcr p15, 0, r0, c7, c10, 1 @ Clean D cache line
mcr p15, 0, ip, c7, c10, 4 @ Drain Write (& Fill) Buffer
mov pc, lr mov pc, lr
/* ================================ I-CACHE =============================== */
/* /*
* cpu_xscale_icache_invalidate_range(start, end) * dma_flush_range(start, end)
*
* invalidate a range of virtual addresses from the Icache
* *
* start: virtual start address * Clean and invalidate the specified virtual address range.
* end: virtual end address
* *
* Note: This is vaguely defined as supposed to bring the dcache and the * - start - virtual start address
* icache in sync by the way this function is used. * - end - virtual end address
*/ */
.align 5 ENTRY(xscale_dma_flush_range)
ENTRY(cpu_xscale_icache_invalidate_range)
bic r0, r0, #CACHELINESIZE - 1 bic r0, r0, #CACHELINESIZE - 1
1: mcr p15, 0, r0, c7, c10, 1 @ Clean D cache line 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
mcr p15, 0, r0, c7, c5, 1 @ Invalidate I cache line mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
add r0, r0, #CACHELINESIZE add r0, r0, #CACHELINESIZE
cmp r0, r1 cmp r0, r1
blo 1b blo 1b
mcr p15, 0, ip, c7, c5, 6 @ Invalidate BTB mcr p15, 0, r0, c7, c10, 1 @ Drain Write (& Fill) Buffer
mcr p15, 0, ip, c7, c10, 4 @ Drain Write (& Fill) Buffer
mov pc, lr mov pc, lr
/* ENTRY(xscale_cache_fns)
* cpu_xscale_icache_invalidate_page(page) .long xscale_flush_kern_cache_all
* .long xscale_flush_user_cache_all
* invalidate all Icache lines associated with this area of memory .long xscale_flush_user_cache_range
* .long xscale_coherent_kern_range
* page: page to invalidate .long xscale_flush_kern_dcache_page
*/ .long xscale_dma_inv_range
.align 5 .long xscale_dma_clean_range
ENTRY(cpu_xscale_icache_invalidate_page) .long xscale_dma_flush_range
mov r1, #PAGESIZE
1: mcr p15, 0, r0, c7, c5, 1 @ Invalidate I cache line ENTRY(cpu_xscale_dcache_clean_area)
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
add r0, r0, #CACHELINESIZE add r0, r0, #CACHELINESIZE
mcr p15, 0, r0, c7, c5, 1 @ Invalidate I cache line subs r1, r1, #CACHELINESIZE
add r0, r0, #CACHELINESIZE bhi 1b
mcr p15, 0, r0, c7, c5, 1 @ Invalidate I cache line
add r0, r0, #CACHELINESIZE
mcr p15, 0, r0, c7, c5, 1 @ Invalidate I cache line
add r0, r0, #CACHELINESIZE
subs r1, r1, #4 * CACHELINESIZE
bne 1b
mcr p15, 0, r0, c7, c5, 6 @ Invalidate BTB
mov pc, lr mov pc, lr
/* ================================ CACHE LOCKING============================ /* ================================ CACHE LOCKING============================
...@@ -553,7 +496,6 @@ ENTRY(xscale_dtlb_unlock) ...@@ -553,7 +496,6 @@ ENTRY(xscale_dtlb_unlock)
/* =============================== PageTable ============================== */ /* =============================== PageTable ============================== */
#define PMD_CACHE_WRITE_ALLOCATE 0
#define PTE_CACHE_WRITE_ALLOCATE 0 #define PTE_CACHE_WRITE_ALLOCATE 0
/* /*
...@@ -588,7 +530,7 @@ ENTRY(cpu_xscale_set_pte) ...@@ -588,7 +530,7 @@ ENTRY(cpu_xscale_set_pte)
eor r3, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY eor r3, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY
tst r3, #L_PTE_USER | L_PTE_EXEC @ User or Exec? tst r3, #L_PTE_USER @ User?
orrne r2, r2, #PTE_EXT_AP_URO_SRW @ yes -> user r/o, system r/w orrne r2, r2, #PTE_EXT_AP_URO_SRW @ yes -> user r/o, system r/w
tst r3, #L_PTE_WRITE | L_PTE_DIRTY @ Write and Dirty? tst r3, #L_PTE_WRITE | L_PTE_DIRTY @ Write and Dirty?
...@@ -616,12 +558,10 @@ ENTRY(cpu_xscale_set_pte) ...@@ -616,12 +558,10 @@ ENTRY(cpu_xscale_set_pte)
@ Erratum 40: The B bit must be cleared for a user read-only @ Erratum 40: The B bit must be cleared for a user read-only
@ cacheable page. @ cacheable page.
@ @
@ B = B & ~((U|E) & C & ~W) @ B = B & ~(U & C & ~W)
@ @
and ip, r1, #L_PTE_USER | L_PTE_EXEC | L_PTE_WRITE | L_PTE_CACHEABLE and ip, r1, #L_PTE_USER | L_PTE_WRITE | L_PTE_CACHEABLE
teq ip, #L_PTE_USER | L_PTE_CACHEABLE teq ip, #L_PTE_USER | L_PTE_CACHEABLE
teqne ip, #L_PTE_EXEC | L_PTE_CACHEABLE
teqne ip, #L_PTE_USER | L_PTE_EXEC | L_PTE_CACHEABLE
biceq r2, r2, #PTE_BUFFERABLE biceq r2, r2, #PTE_BUFFERABLE
tst r3, #L_PTE_PRESENT | L_PTE_YOUNG @ Present and Young? tst r3, #L_PTE_PRESENT | L_PTE_YOUNG @ Present and Young?
...@@ -686,23 +626,7 @@ ENTRY(xscale_processor_functions) ...@@ -686,23 +626,7 @@ ENTRY(xscale_processor_functions)
.word cpu_xscale_proc_fin .word cpu_xscale_proc_fin
.word cpu_xscale_reset .word cpu_xscale_reset
.word cpu_xscale_do_idle .word cpu_xscale_do_idle
.word cpu_xscale_dcache_clean_area
/* cache */
.word cpu_xscale_cache_clean_invalidate_all
.word cpu_xscale_cache_clean_invalidate_range
.word cpu_xscale_flush_ram_page
/* dcache */
.word cpu_xscale_dcache_invalidate_range
.word cpu_xscale_dcache_clean_range
.word cpu_xscale_dcache_clean_page
.word cpu_xscale_dcache_clean_entry
/* icache */
.word cpu_xscale_icache_invalidate_range
.word cpu_xscale_icache_invalidate_page
/* pgtable */
.word cpu_xscale_set_pgd .word cpu_xscale_set_pgd
.word cpu_xscale_set_pte .word cpu_xscale_set_pte
.size xscale_processor_functions, . - xscale_processor_functions .size xscale_processor_functions, . - xscale_processor_functions
...@@ -733,6 +657,7 @@ __80200_proc_info: ...@@ -733,6 +657,7 @@ __80200_proc_info:
.long xscale_processor_functions .long xscale_processor_functions
.long v4wbi_tlb_fns .long v4wbi_tlb_fns
.long xscale_mc_user_fns .long xscale_mc_user_fns
.long xscale_cache_fns
.size __80200_proc_info, . - __80200_proc_info .size __80200_proc_info, . - __80200_proc_info
.type __80321_proc_info,#object .type __80321_proc_info,#object
...@@ -764,6 +689,7 @@ __pxa250_proc_info: ...@@ -764,6 +689,7 @@ __pxa250_proc_info:
.long xscale_processor_functions .long xscale_processor_functions
.long v4wbi_tlb_fns .long v4wbi_tlb_fns
.long xscale_mc_user_fns .long xscale_mc_user_fns
.long xscale_cache_fns
.size __pxa250_proc_info, . - __pxa250_proc_info .size __pxa250_proc_info, . - __pxa250_proc_info
.type __pxa210_proc_info,#object .type __pxa210_proc_info,#object
......
...@@ -46,85 +46,31 @@ extern struct processor { ...@@ -46,85 +46,31 @@ extern struct processor {
/* /*
* Processor architecture specific * Processor architecture specific
*/ */
struct { /* CACHE */ /*
/* * clean a virtual address range from the
* flush all caches * D-cache without flushing the cache.
*/ */
void (*clean_invalidate_all)(void); void (*dcache_clean_area)(void *addr, int size);
/*
* flush a specific page or pages
*/
void (*clean_invalidate_range)(unsigned long address, unsigned long end, int flags);
} cache;
struct { /* D-cache */
/*
* invalidate the specified data range
*/
void (*invalidate_range)(unsigned long start, unsigned long end);
/*
* clean specified data range
*/
void (*clean_range)(unsigned long start, unsigned long end);
/*
* obsolete flush cache entry
*/
void (*clean_page)(void *virt_page);
/*
* clean a virtual address range from the
* D-cache without flushing the cache.
*/
void (*clean_entry)(unsigned long start);
} dcache;
struct { /* I-cache */
/*
* invalidate the I-cache for the specified range
*/
void (*invalidate_range)(unsigned long start, unsigned long end);
/*
* invalidate the I-cache for the specified virtual page
*/
void (*invalidate_page)(void *virt_page);
} icache;
struct { /* PageTable */ /*
/* * Set the page table
* Set the page table */
*/ void (*switch_mm)(unsigned long pgd_phys, struct mm_struct *mm);
void (*set_pgd)(unsigned long pgd_phys, struct mm_struct *mm); /*
/* * Set a PTE
* Set a PTE */
*/ void (*set_pte)(pte_t *ptep, pte_t pte);
void (*set_pte)(pte_t *ptep, pte_t pte);
} pgtable;
} processor; } processor;
extern const struct processor arm6_processor_functions; #define cpu_check_bugs() processor._check_bugs()
extern const struct processor arm7_processor_functions; #define cpu_proc_init() processor._proc_init()
extern const struct processor sa110_processor_functions; #define cpu_proc_fin() processor._proc_fin()
#define cpu_reset(addr) processor.reset(addr)
#define cpu_check_bugs() processor._check_bugs() #define cpu_do_idle() processor._do_idle()
#define cpu_proc_init() processor._proc_init() #define cpu_dcache_clean_area(addr,sz) processor.dcache_clean_area(addr,sz)
#define cpu_proc_fin() processor._proc_fin() #define cpu_set_pte(ptep, pte) processor.set_pte(ptep, pte)
#define cpu_reset(addr) processor.reset(addr)
#define cpu_do_idle() processor._do_idle()
#define cpu_cache_clean_invalidate_all() processor.cache.clean_invalidate_all()
#define cpu_cache_clean_invalidate_range(s,e,f) processor.cache.clean_invalidate_range(s,e,f)
#define cpu_dcache_clean_page(vp) processor.dcache.clean_page(vp)
#define cpu_dcache_clean_entry(addr) processor.dcache.clean_entry(addr)
#define cpu_dcache_clean_range(s,e) processor.dcache.clean_range(s,e)
#define cpu_dcache_invalidate_range(s,e) processor.dcache.invalidate_range(s,e)
#define cpu_icache_invalidate_range(s,e) processor.icache.invalidate_range(s,e)
#define cpu_icache_invalidate_page(vp) processor.icache.invalidate_page(vp)
#define cpu_set_pgd(pgd,mm) processor.pgtable.set_pgd(pgd,mm)
#define cpu_set_pte(ptep, pte) processor.pgtable.set_pte(ptep, pte)
#define cpu_switch_mm(pgd,mm) cpu_set_pgd(__virt_to_phys((unsigned long)(pgd)),mm) #define cpu_switch_mm(pgd,mm) processor.switch_mm(__virt_to_phys((unsigned long)(pgd)),mm)
#define cpu_get_pgd() \ #define cpu_get_pgd() \
({ \ ({ \
......
...@@ -27,14 +27,7 @@ ...@@ -27,14 +27,7 @@
#define cpu_proc_fin __cpu_fn(CPU_NAME,_proc_fin) #define cpu_proc_fin __cpu_fn(CPU_NAME,_proc_fin)
#define cpu_reset __cpu_fn(CPU_NAME,_reset) #define cpu_reset __cpu_fn(CPU_NAME,_reset)
#define cpu_do_idle __cpu_fn(CPU_NAME,_do_idle) #define cpu_do_idle __cpu_fn(CPU_NAME,_do_idle)
#define cpu_cache_clean_invalidate_all __cpu_fn(CPU_NAME,_cache_clean_invalidate_all) #define cpu_dcache_clean_area __cpu_fn(CPU_NAME,_dcache_clean_area)
#define cpu_cache_clean_invalidate_range __cpu_fn(CPU_NAME,_cache_clean_invalidate_range)
#define cpu_dcache_invalidate_range __cpu_fn(CPU_NAME,_dcache_invalidate_range)
#define cpu_dcache_clean_range __cpu_fn(CPU_NAME,_dcache_clean_range)
#define cpu_dcache_clean_page __cpu_fn(CPU_NAME,_dcache_clean_page)
#define cpu_dcache_clean_entry __cpu_fn(CPU_NAME,_dcache_clean_entry)
#define cpu_icache_invalidate_range __cpu_fn(CPU_NAME,_icache_invalidate_range)
#define cpu_icache_invalidate_page __cpu_fn(CPU_NAME,_icache_invalidate_page)
#define cpu_set_pgd __cpu_fn(CPU_NAME,_set_pgd) #define cpu_set_pgd __cpu_fn(CPU_NAME,_set_pgd)
#define cpu_set_pte __cpu_fn(CPU_NAME,_set_pte) #define cpu_set_pte __cpu_fn(CPU_NAME,_set_pte)
...@@ -46,23 +39,11 @@ ...@@ -46,23 +39,11 @@
struct mm_struct; struct mm_struct;
/* declare all the functions as extern */ /* declare all the functions as extern */
extern void cpu_data_abort(unsigned long pc);
extern void cpu_check_bugs(void); extern void cpu_check_bugs(void);
extern void cpu_proc_init(void); extern void cpu_proc_init(void);
extern void cpu_proc_fin(void); extern void cpu_proc_fin(void);
extern int cpu_do_idle(void); extern int cpu_do_idle(void);
extern void cpu_dcache_clean_area(void *, int);
extern void cpu_cache_clean_invalidate_all(void);
extern void cpu_cache_clean_invalidate_range(unsigned long address, unsigned long end, int flags);
extern void cpu_dcache_invalidate_range(unsigned long start, unsigned long end);
extern void cpu_dcache_clean_range(unsigned long start, unsigned long end);
extern void cpu_dcache_clean_page(void *virt_page);
extern void cpu_dcache_clean_entry(unsigned long address);
extern void cpu_icache_invalidate_range(unsigned long start, unsigned long end);
extern void cpu_icache_invalidate_page(void *virt_page);
extern void cpu_set_pgd(unsigned long pgd_phys, struct mm_struct *mm); extern void cpu_set_pgd(unsigned long pgd_phys, struct mm_struct *mm);
extern void cpu_set_pte(pte_t *ptep, pte_t pte); extern void cpu_set_pte(pte_t *ptep, pte_t pte);
......
...@@ -10,6 +10,62 @@ ...@@ -10,6 +10,62 @@
#include <asm/mman.h> #include <asm/mman.h>
#include <asm/glue.h> #include <asm/glue.h>
/*
* Cache Model
* ===========
*/
#undef _CACHE
#undef MULTI_CACHE
#if defined(CONFIG_CPU_ARM610) || defined(CONFIG_CPU_ARM710)
# ifdef _CACHE
# define MULTI_CACHE 1
# else
# define _CACHE v3
# endif
#endif
#if defined(CONFIG_CPU_ARM720T)
# ifdef _CACHE
# define MULTI_CACHE 1
# else
# define _CACHE v4
# endif
#endif
#if defined(CONFIG_CPU_ARM920T) || defined(CONFIG_CPU_ARM922T) || \
defined(CONFIG_CPU_ARM1020)
# define MULTI_CACHE 1
#endif
#if defined(CONFIG_CPU_ARM926T)
# ifdef _CACHE
# define MULTI_CACHE 1
# else
# define _CACHE arm926
# endif
#endif
#if defined(CONFIG_CPU_SA110) || defined(CONFIG_CPU_SA1100)
# ifdef _CACHE
# define MULTI_CACHE 1
# else
# define _CACHE v4wb
# endif
#endif
#if defined(CONFIG_CPU_XSCALE)
# ifdef _CACHE
# define MULTI_CACHE 1
# else
# define _CACHE xscale
# endif
#endif
#if !defined(_CACHE) && !defined(MULTI_CACHE)
#error Unknown cache maintainence model
#endif
/* /*
* This flag is used to indicate that the page pointed to by a pte * This flag is used to indicate that the page pointed to by a pte
* is dirty and requires cleaning before returning it to the user. * is dirty and requires cleaning before returning it to the user.
...@@ -17,16 +73,94 @@ ...@@ -17,16 +73,94 @@
#define PG_dcache_dirty PG_arch_1 #define PG_dcache_dirty PG_arch_1
/* /*
* Cache handling for 32-bit ARM processors. * MM Cache Management
* ===================
*
* The arch/arm/mm/cache-*.S and arch/arm/mm/proc-*.S files
* implement these methods.
*
* Start addresses are inclusive and end addresses are exclusive;
* start addresses should be rounded down, end addresses up.
*
* See linux/Documentation/cachetlb.txt for more information.
* Please note that the implementation of these, and the required
* effects are cache-type (VIVT/VIPT/PIPT) specific.
*
* flush_cache_kern_all()
*
* Unconditionally clean and invalidate the entire cache.
*
* flush_cache_user_mm(mm)
*
* Clean and invalidate all user space cache entries
* before a change of page tables.
*
* flush_cache_user_range(start, end, flags)
*
* Clean and invalidate a range of cache entries in the
* specified address space before a change of page tables.
* - start - user start address (inclusive, page aligned)
* - end - user end address (exclusive, page aligned)
* - flags - vma->vm_flags field
*
* coherent_kern_range(start, end)
*
* Ensure coherency between the Icache and the Dcache in the
* region described by start, end. If you have non-snooping
* Harvard caches, you need to implement this function.
* - start - virtual start address
* - end - virtual end address
*
* DMA Cache Coherency
* ===================
*
* dma_inv_range(start, end)
* *
* Note that on ARM, we have a more accurate specification than that * Invalidate (discard) the specified virtual address range.
* Linux's "flush". We therefore do not use "flush" here, but instead * May not write back any entries. If 'start' or 'end'
* use: * are not cache line aligned, those lines must be written
* back.
* - start - virtual start address
* - end - virtual end address
* *
* clean: the act of pushing dirty cache entries out to memory. * dma_clean_range(start, end)
* invalidate: the act of discarding data held within the cache, *
* whether it is dirty or not. * Clean (write back) the specified virtual address range.
* - start - virtual start address
* - end - virtual end address
*
* dma_flush_range(start, end)
*
* Clean and invalidate the specified virtual address range.
* - start - virtual start address
* - end - virtual end address
*/
struct cpu_cache_fns {
void (*flush_kern_all)(void);
void (*flush_user_all)(void);
void (*flush_user_range)(unsigned long, unsigned long, unsigned int);
void (*coherent_kern_range)(unsigned long, unsigned long);
void (*flush_kern_dcache_page)(void *);
void (*dma_inv_range)(unsigned long, unsigned long);
void (*dma_clean_range)(unsigned long, unsigned long);
void (*dma_flush_range)(unsigned long, unsigned long);
};
/*
* Select the calling method
*/ */
#ifdef MULTI_CACHE
extern struct cpu_cache_fns cpu_cache;
#define __cpuc_flush_kern_all cpu_cache.flush_kern_all
#define __cpuc_flush_user_all cpu_cache.flush_user_all
#define __cpuc_flush_user_range cpu_cache.flush_user_range
#define __cpuc_coherent_kern_range cpu_cache.coherent_kern_range
#define __cpuc_flush_dcache_page cpu_cache.flush_kern_dcache_page
/* /*
* These are private to the dma-mapping API. Do not use directly. * These are private to the dma-mapping API. Do not use directly.
...@@ -34,27 +168,57 @@ ...@@ -34,27 +168,57 @@
* is visible to DMA, or data written by DMA to system memory is * is visible to DMA, or data written by DMA to system memory is
* visible to the CPU. * visible to the CPU.
*/ */
#define dmac_inv_range cpu_dcache_invalidate_range #define dmac_inv_range cpu_cache.dma_inv_range
#define dmac_clean_range cpu_dcache_clean_range #define dmac_clean_range cpu_cache.dma_clean_range
#define dmac_flush_range(_s,_e) cpu_cache_clean_invalidate_range((_s),(_e),0) #define dmac_flush_range cpu_cache.dma_flush_range
#else
#define __cpuc_flush_kern_all __glue(_CACHE,_flush_kern_cache_all)
#define __cpuc_flush_user_all __glue(_CACHE,_flush_user_cache_all)
#define __cpuc_flush_user_range __glue(_CACHE,_flush_user_cache_range)
#define __cpuc_coherent_kern_range __glue(_CACHE,_coherent_kern_range)
#define __cpuc_flush_dcache_page __glue(_CACHE,_flush_kern_dcache_page)
extern void __cpuc_flush_kern_all(void);
extern void __cpuc_flush_user_all(void);
extern void __cpuc_flush_user_range(unsigned long, unsigned long, unsigned int);
extern void __cpuc_coherent_kern_range(unsigned long, unsigned long);
extern void __cpuc_flush_dcache_page(void *);
/*
* These are private to the dma-mapping API. Do not use directly.
* Their sole purpose is to ensure that data held in the cache
* is visible to DMA, or data written by DMA to system memory is
* visible to the CPU.
*/
#define dmac_inv_range __glue(_CACHE,_dma_inv_range)
#define dmac_clean_range __glue(_CACHE,_dma_clean_range)
#define dmac_flush_range __glue(_CACHE,_dma_flush_range)
extern void dmac_inv_range(unsigned long, unsigned long);
extern void dmac_clean_range(unsigned long, unsigned long);
extern void dmac_flush_range(unsigned long, unsigned long);
#endif
/* /*
* Convert calls to our calling convention. * Convert calls to our calling convention.
*/ */
#define flush_cache_all() cpu_cache_clean_invalidate_all() #define flush_cache_all() __cpuc_flush_kern_all()
static inline void flush_cache_mm(struct mm_struct *mm) static inline void flush_cache_mm(struct mm_struct *mm)
{ {
if (current->active_mm == mm) if (current->active_mm == mm)
cpu_cache_clean_invalidate_all(); __cpuc_flush_user_all();
} }
static inline void static inline void
flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
{ {
if (current->active_mm == vma->vm_mm) if (current->active_mm == vma->vm_mm)
cpu_cache_clean_invalidate_range(start & PAGE_MASK, __cpuc_flush_user_range(start & PAGE_MASK, PAGE_ALIGN(end),
PAGE_ALIGN(end), vma->vm_flags); vma->vm_flags);
} }
static inline void static inline void
...@@ -62,8 +226,7 @@ flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr) ...@@ -62,8 +226,7 @@ flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr)
{ {
if (current->active_mm == vma->vm_mm) { if (current->active_mm == vma->vm_mm) {
unsigned long addr = user_addr & PAGE_MASK; unsigned long addr = user_addr & PAGE_MASK;
cpu_cache_clean_invalidate_range(addr, addr + PAGE_SIZE, __cpuc_flush_user_range(addr, addr + PAGE_SIZE, vma->vm_flags);
vma->vm_flags & VM_EXEC);
} }
} }
...@@ -71,15 +234,13 @@ flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr) ...@@ -71,15 +234,13 @@ flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr)
* Perform necessary cache operations to ensure that data previously * Perform necessary cache operations to ensure that data previously
* stored within this range of addresses can be executed by the CPU. * stored within this range of addresses can be executed by the CPU.
*/ */
#define flush_icache_range(s,e) cpu_icache_invalidate_range(s,e) #define flush_icache_range(s,e) __cpuc_coherent_kern_range(s,e)
/* /*
* Perform necessary cache operations to ensure that the TLB will * Perform necessary cache operations to ensure that the TLB will
* see data written in the specified area. * see data written in the specified area.
*/ */
#define clean_dcache_area(start,size) \ #define clean_dcache_area(start,size) cpu_dcache_clean_area(start, size)
cpu_cache_clean_invalidate_range((unsigned long)start, \
((unsigned long)start) + size, 0);
/* /*
* flush_dcache_page is used when the kernel has written to the page * flush_dcache_page is used when the kernel has written to the page
......
...@@ -14,6 +14,7 @@ ...@@ -14,6 +14,7 @@
struct cpu_tlb_fns; struct cpu_tlb_fns;
struct cpu_user_fns; struct cpu_user_fns;
struct cpu_cache_fns;
struct processor; struct processor;
/* /*
...@@ -37,13 +38,14 @@ struct proc_info_list { ...@@ -37,13 +38,14 @@ struct proc_info_list {
struct processor *proc; struct processor *proc;
struct cpu_tlb_fns *tlb; struct cpu_tlb_fns *tlb;
struct cpu_user_fns *user; struct cpu_user_fns *user;
struct cpu_cache_fns *cache;
}; };
extern unsigned int elf_hwcap; extern unsigned int elf_hwcap;
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
#define PROC_INFO_SZ 44 #define PROC_INFO_SZ 48
#define HWCAP_SWP 1 #define HWCAP_SWP 1
#define HWCAP_HALF 2 #define HWCAP_HALF 2
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment