Commit 295864f7 authored by Russell King's avatar Russell King

[ARM] Update ARM CPU support.

Move ARM CPU configuration to arch/arm/mm.  Seperate out the selection
of the abort, cache handling, optimised page copying and TLB handling
from the Makefile, and move it into the configuration system.  This
allows us to select the correct files in arch/arm/mm and pick the
appropriate definitions in include/asm-arm/* based upon a config
symbol rather than a bunch of configuration symbols.

Also add ARM1020E, ARM1022 and ARM1026 CPU support.
parent aa26ae6f
......@@ -213,198 +213,7 @@ config FORCE_MAX_ZONEORDER
depends on SA1111
default "9"
comment "Processor Type"
# Figure out whether this system uses 26-bit or 32-bit CPUs.
config CPU_32
bool
default y
# Select CPU types depending on the architecture selected. This selects
# which CPUs we support in the kernel image, and the compiler instruction
# optimiser behaviour.
# ARM610
config CPU_ARM610
bool "Support ARM610 processor"
depends on ARCH_RPC
help
The ARM610 is the successor to the ARM3 processor
and was produced by VLSI Technology Inc.
Say Y if you want support for the ARM610 processor.
Otherwise, say N.
# ARM710
config CPU_ARM710
bool "Support ARM710 processor" if !ARCH_CLPS7500 && ARCH_RPC
default y if ARCH_CLPS7500
help
A 32-bit RISC microprocessor based on the ARM7 processor core
designed by Advanced RISC Machines Ltd. The ARM710 is the
successor to the ARM610 processor. It was released in
July 1994 by VLSI Technology Inc.
Say Y if you want support for the ARM710 processor.
Otherwise, say N.
# ARM720T
config CPU_ARM720T
bool "Support ARM720T processor" if !ARCH_CLPS711X && !ARCH_L7200 && !ARCH_CDB89712 && ARCH_INTEGRATOR
default y if ARCH_CLPS711X || ARCH_L7200 || ARCH_CDB89712
help
A 32-bit RISC processor with 8kByte Cache, Write Buffer and
MMU built around an ARM7TDMI core.
Say Y if you want support for the ARM720T processor.
Otherwise, say N.
# ARM920T
config CPU_ARM920T
bool "Support ARM920T processor"
depends on ARCH_INTEGRATOR
help
The ARM920T is licensed to be produced by numerous vendors,
and is used in the Maverick EP9312. More information at
<http://linuxdevices.com/products/PD2382866068.html>.
Say Y if you want support for the ARM920T processor.
Otherwise, say N.
# ARM922T
config CPU_ARM922T
bool
depends on ARCH_CAMELOT
default y
help
The ARM922T is a version of the ARM920T, but with smaller
instruction and data caches. It is used in Altera's
Excalibur XA device family.
Say Y if you want support for the ARM922T processor.
Otherwise, say N.
# ARM926T
config CPU_ARM926T
bool "Support ARM926T processor"
depends on ARCH_INTEGRATOR
help
This is a variant of the ARM920. It has slightly different
instruction sequences for cache and TLB operations. Curiously,
there is no documentation on it at the ARM corporate website.
Say Y if you want support for the ARM926T processor.
Otherwise, say N.
# ARM1020
config CPU_ARM1020
bool "Support ARM1020 processor"
depends on ARCH_INTEGRATOR
help
The ARM1020 is the cached version of the ARM10 processor,
with an addition of a floating-point unit.
Say Y if you want support for the ARM1020 processor.
Otherwise, say N.
# SA110
config CPU_SA110
bool "Support StrongARM(R) SA-110 processor" if !ARCH_EBSA110 && !FOOTBRIDGE && !ARCH_TBOX && !ARCH_SHARK && !ARCH_NEXUSPCI && !ARCH_ANAKIN && ARCH_RPC
default y if ARCH_EBSA110 || FOOTBRIDGE || ARCH_TBOX || ARCH_SHARK || ARCH_NEXUSPCI || ARCH_ANAKIN
help
The Intel StrongARM(R) SA-110 is a 32-bit microprocessor and
is available at five speeds ranging from 100 MHz to 233 MHz.
More information is available at
<http://developer.intel.com/design/strong/sa110.htm>.
Say Y if you want support for the SA-110 processor.
Otherwise, say N.
# SA1100
config CPU_SA1100
bool
depends on ARCH_SA1100
default y
# XScale
config CPU_XSCALE
bool
depends on ARCH_IOP3XX || ARCH_ADIFCC || ARCH_PXA
default y
# Figure out what processor architecture version we should be using.
# This defines the compiler instruction set which depends on the machine type.
config CPU_32v3
bool
depends on ARCH_RPC || ARCH_CLPS7500
default y
config CPU_32v4
bool
depends on ARCH_EBSA110 || FOOTBRIDGE || ARCH_TBOX || ARCH_SHARK || ARCH_NEXUSPCI || ARCH_CLPS711X || ARCH_INTEGRATOR || ARCH_SA1100 || ARCH_L7200 || ARCH_ANAKIN || ARCH_CAMELOT
default y
config CPU_32v5
bool
depends on ARCH_IOP3XX || ARCH_ADIFCC || ARCH_PXA
default y
comment "Processor Features"
config ARM_THUMB
bool "Support Thumb instructions (EXPERIMENTAL)"
depends on (CPU_ARM720T || CPU_ARM920T || CPU_ARM922T || CPU_ARM926T || CPU_ARM1020 || CPU_XSCALE) && EXPERIMENTAL
help
Say Y if you want to have kernel support for ARM Thumb instructions,
fault handlers, and system calls.
The Thumb instruction set is a compressed form of the standard ARM
instruction set resulting in smaller binaries at the expense of
slightly less efficient code.
If you don't know what this all is, saying Y is a safe choice.
config CPU_BIG_ENDIAN
bool "Build big-endian kernel"
depends on ARCH_SUPPORTS_BIG_ENDIAN
help
Say Y if you plan on running a kernel in big-endian mode.
Note that your board must be properly built and your board
port must properly enable and big-endian related features
of your chipset/board/processor.
config CPU_ICACHE_DISABLE
bool "Disable I-Cache"
depends on CPU_ARM920T || CPU_ARM922T || CPU_ARM926T || CPU_ARM1020
help
Say Y here to disable the processor instruction cache. Unless
you have a reason not to or are unsure, say N.
config CPU_DCACHE_DISABLE
bool "Disable D-Cache"
depends on CPU_ARM920T || CPU_ARM922T || CPU_ARM926T || CPU_ARM1020
help
Say Y here to disable the processor data cache. Unless
you have a reason not to or are unsure, say N.
config CPU_DCACHE_WRITETHROUGH
bool "Force write through D-cache"
depends on (CPU_ARM920T || CPU_ARM922T || CPU_ARM926T || CPU_ARM1020) && !CPU_DISABLE_DCACHE
help
Say Y here to use the data cache in writethough mode. Unless you
specifically require this or are unsure, say N.
config CPU_CACHE_ROUND_ROBIN
bool "Round robin I and D cache replacement algorithm"
depends on (CPU_ARM926T || CPU_ARM1020) && (!CPU_ICACHE_DISABLE || !CPU_DCACHE_DISABLE)
help
Say Y here to use the predictable round-robin cache replacement
policy. Unless you specifically require this or are unsure, say N.
config CPU_BPREDICT_DISABLE
bool "Disable branch prediction"
depends on CPU_ARM1020
help
Say Y here to disable branch prediction. If unsure, say N.
source arch/arm/mm/Kconfig
# bool 'Use XScale PMU as timer source' CONFIG_XSCALE_PMU_TIMER
config XSCALE_PMU
......
......@@ -15,9 +15,6 @@
#include <linux/sched.h>
#include <linux/mm.h>
#include <asm/pgtable.h>
#include <asm/uaccess.h>
/*
* Make sure that the compiler and target are compatible.
*/
......@@ -58,19 +55,6 @@ int main(void)
BLANK();
DEFINE(VM_EXEC, VM_EXEC);
BLANK();
DEFINE(HPTE_TYPE_SMALL, PTE_TYPE_SMALL);
DEFINE(HPTE_AP_READ, PTE_AP_READ);
DEFINE(HPTE_AP_WRITE, PTE_AP_WRITE);
BLANK();
DEFINE(LPTE_PRESENT, L_PTE_PRESENT);
DEFINE(LPTE_YOUNG, L_PTE_YOUNG);
DEFINE(LPTE_BUFFERABLE, L_PTE_BUFFERABLE);
DEFINE(LPTE_CACHEABLE, L_PTE_CACHEABLE);
DEFINE(LPTE_USER, L_PTE_USER);
DEFINE(LPTE_WRITE, L_PTE_WRITE);
DEFINE(LPTE_EXEC, L_PTE_EXEC);
DEFINE(LPTE_DIRTY, L_PTE_DIRTY);
BLANK();
DEFINE(PAGE_SZ, PAGE_SIZE);
BLANK();
DEFINE(SYS_ERROR0, 0x9f0000);
......
comment "Processor Type"
config CPU_32
bool
default y
# Select CPU types depending on the architecture selected. This selects
# which CPUs we support in the kernel image, and the compiler instruction
# optimiser behaviour.
# ARM610
config CPU_ARM610
bool "Support ARM610 processor"
depends on ARCH_RPC
select CPU_32v3
select CPU_CACHE_V3
select CPU_COPY_V3
select CPU_TLB_V3
help
The ARM610 is the successor to the ARM3 processor
and was produced by VLSI Technology Inc.
Say Y if you want support for the ARM610 processor.
Otherwise, say N.
# ARM710
config CPU_ARM710
bool "Support ARM710 processor" if !ARCH_CLPS7500 && ARCH_RPC
default y if ARCH_CLPS7500
select CPU_32v3
select CPU_CACHE_V3
select CPU_COPY_V3
select CPU_TLB_V3
help
A 32-bit RISC microprocessor based on the ARM7 processor core
designed by Advanced RISC Machines Ltd. The ARM710 is the
successor to the ARM610 processor. It was released in
July 1994 by VLSI Technology Inc.
Say Y if you want support for the ARM710 processor.
Otherwise, say N.
# ARM720T
config CPU_ARM720T
bool "Support ARM720T processor" if !ARCH_CLPS711X && !ARCH_L7200 && !ARCH_CDB89712 && ARCH_INTEGRATOR
default y if ARCH_CLPS711X || ARCH_L7200 || ARCH_CDB89712
select CPU_32v4
select CPU_ABRT_LV4T
select CPU_CACHE_V4
select CPU_COPY_V4WT
select CPU_TLB_V4WT
help
A 32-bit RISC processor with 8kByte Cache, Write Buffer and
MMU built around an ARM7TDMI core.
Say Y if you want support for the ARM720T processor.
Otherwise, say N.
# ARM920T
config CPU_ARM920T
bool "Support ARM920T processor"
depends on ARCH_INTEGRATOR
select CPU_32v4
select CPU_ABRT_EV4T
select CPU_CACHE_V4WT
select CPU_COPY_V4WB
select CPU_TLB_V4WBI
help
The ARM920T is licensed to be produced by numerous vendors,
and is used in the Maverick EP9312. More information at
<http://linuxdevices.com/products/PD2382866068.html>.
Say Y if you want support for the ARM920T processor.
Otherwise, say N.
# ARM922T
config CPU_ARM922T
bool
depends on ARCH_CAMELOT
default y
select CPU_32v4
select CPU_ABRT_EV4T
select CPU_CACHE_V4WT
select CPU_COPY_V4WB
select CPU_TLB_V4WBI
help
The ARM922T is a version of the ARM920T, but with smaller
instruction and data caches. It is used in Altera's
Excalibur XA device family.
Say Y if you want support for the ARM922T processor.
Otherwise, say N.
# ARM926T
config CPU_ARM926T
bool "Support ARM926T processor"
depends on ARCH_INTEGRATOR
select CPU_32v5
select CPU_ABRT_EV5TJ
select CPU_COPY_V4WB
select CPU_TLB_V4WBI
help
This is a variant of the ARM920. It has slightly different
instruction sequences for cache and TLB operations. Curiously,
there is no documentation on it at the ARM corporate website.
Say Y if you want support for the ARM926T processor.
Otherwise, say N.
# ARM1020 - needs validating
config CPU_ARM1020
bool "Support ARM1020T (rev 0) processor"
depends on ARCH_INTEGRATOR
select CPU_32v5
select CPU_ABRT_EV4T
select CPU_CACHE_V4WT
select CPU_COPY_V4WB
select CPU_TLB_V4WBI
help
The ARM1020 is the 32K cached version of the ARM10 processor,
with an addition of a floating-point unit.
Say Y if you want support for the ARM1020 processor.
Otherwise, say N.
# ARM1020E - needs validating
config CPU_ARM1020E
bool "Support ARM1020E processor"
depends on ARCH_INTEGRATOR
select CPU_32v5
select CPU_ABRT_EV4T
select CPU_CACHE_V4WT
select CPU_COPY_V4WB
select CPU_TLB_V4WBI
depends on n
# ARM1022E
config CPU_ARM1022
bool "Support ARM1022E processor"
depends on ARCH_INTEGRATOR
select CPU_32v5
select CPU_ABRT_EV4T
select CPU_COPY_V4WB # can probably do better
select CPU_TLB_V4WBI
help
The ARM1022E is an implementation of the ARMv5TE architecture
based upon the ARM10 integer core with a 16KiB L1 Harvard cache,
embedded trace macrocell, and a floating-point unit.
Say Y if you want support for the ARM1022E processor.
Otherwise, say N.
# ARM1026EJ-S
config CPU_ARM1026
bool "Support ARM1026EJ-S processor"
depends on ARCH_INTEGRATOR
select CPU_32v5
select CPU_ABRT_EV5T # But need Jazelle, but EV5TJ ignores bit 10
select CPU_COPY_V4WB # can probably do better
select CPU_TLB_V4WBI
help
The ARM1026EJ-S is an implementation of the ARMv5TEJ architecture
based upon the ARM10 integer core.
Say Y if you want support for the ARM1026EJ-S processor.
Otherwise, say N.
# SA110
config CPU_SA110
bool "Support StrongARM(R) SA-110 processor" if !ARCH_EBSA110 && !FOOTBRIDGE && !ARCH_TBOX && !ARCH_SHARK && !ARCH_NEXUSPCI && !ARCH_ANAKIN && ARCH_RPC
default y if ARCH_EBSA110 || FOOTBRIDGE || ARCH_TBOX || ARCH_SHARK || ARCH_NEXUSPCI || ARCH_ANAKIN
select CPU_32v3 if ARCH_RPC
select CPU_32v4 if !ARCH_RPC
select CPU_ABRT_EV4
select CPU_CACHE_V4WB
select CPU_COPY_V4WB
select CPU_TLB_V4WB
help
The Intel StrongARM(R) SA-110 is a 32-bit microprocessor and
is available at five speeds ranging from 100 MHz to 233 MHz.
More information is available at
<http://developer.intel.com/design/strong/sa110.htm>.
Say Y if you want support for the SA-110 processor.
Otherwise, say N.
# SA1100
config CPU_SA1100
bool
depends on ARCH_SA1100
default y
select CPU_32v4
select CPU_ABRT_EV4
select CPU_CACHE_V4WB
select CPU_TLB_V4WB
select CPU_MINICACHE
# XScale
config CPU_XSCALE
bool
depends on ARCH_IOP3XX || ARCH_ADIFCC || ARCH_PXA
default y
select CPU_32v5
select CPU_ABRT_EV5T
select CPU_TLB_V4WBI
select CPU_MINICACHE
# This defines the compiler instruction set which depends on the machine type.
config CPU_32v3
bool
config CPU_32v4
bool
config CPU_32v5
bool
# The abort model
config CPU_ABRT_EV4
bool
config CPU_ABRT_EV4T
bool
config CPU_ABRT_LV4T
bool
config CPU_ABRT_EV5T
bool
config CPU_ABRT_EV5TJ
bool
# The cache model
config CPU_CACHE_V3
bool
config CPU_CACHE_V4
bool
config CPU_CACHE_V4WT
bool
config CPU_CACHE_V4WB
bool
# The copy-page model
config CPU_COPY_V3
bool
config CPU_COPY_V4WT
bool
config CPU_COPY_V4WB
bool
# This selects the TLB model
config CPU_TLB_V3
bool
help
ARM Architecture Version 3 TLB.
config CPU_TLB_V4WT
bool
help
ARM Architecture Version 4 TLB with writethrough cache.
config CPU_TLB_V4WB
bool
help
ARM Architecture Version 4 TLB with writeback cache.
config CPU_TLB_V4WBI
bool
help
ARM Architecture Version 4 TLB with writeback cache and invalidate
instruction cache entry.
config CPU_TLB_V6
bool
config CPU_MINICACHE
bool
help
Processor has a minicache.
comment "Processor Features"
config ARM_THUMB
bool "Support Thumb user binaries"
depends on CPU_ARM720T || CPU_ARM920T || CPU_ARM922T || CPU_ARM926T || CPU_ARM1020 || CPU_ARM1020E || CPU_ARM1022 || CPU_ARM1026 || CPU_XSCALE
default y
help
Say Y if you want to have kernel support for ARM Thumb instructions,
fault handlers, and system calls.
The Thumb instruction set is a compressed form of the standard ARM
instruction set resulting in smaller binaries at the expense of
slightly less efficient code.
If you don't know what this all is, saying Y is a safe choice.
config CPU_BIG_ENDIAN
bool "Build big-endian kernel"
depends on ARCH_SUPPORTS_BIG_ENDIAN
help
Say Y if you plan on running a kernel in big-endian mode.
Note that your board must be properly built and your board
port must properly enable and big-endian related features
of your chipset/board/processor.
config CPU_ICACHE_DISABLE
bool "Disable I-Cache"
depends on CPU_ARM920T || CPU_ARM922T || CPU_ARM926T || CPU_ARM1020
help
Say Y here to disable the processor instruction cache. Unless
you have a reason not to or are unsure, say N.
config CPU_DCACHE_DISABLE
bool "Disable D-Cache"
depends on CPU_ARM920T || CPU_ARM922T || CPU_ARM926T || CPU_ARM1020
help
Say Y here to disable the processor data cache. Unless
you have a reason not to or are unsure, say N.
config CPU_DCACHE_WRITETHROUGH
bool "Force write through D-cache"
depends on (CPU_ARM920T || CPU_ARM922T || CPU_ARM926T || CPU_ARM1020) && !CPU_DISABLE_DCACHE
help
Say Y here to use the data cache in writethough mode. Unless you
specifically require this or are unsure, say N.
config CPU_CACHE_ROUND_ROBIN
bool "Round robin I and D cache replacement algorithm"
depends on (CPU_ARM926T || CPU_ARM1020) && (!CPU_ICACHE_DISABLE || !CPU_DCACHE_DISABLE)
help
Say Y here to use the predictable round-robin cache replacement
policy. Unless you specifically require this or are unsure, say N.
config CPU_BPREDICT_DISABLE
bool "Disable branch prediction"
depends on CPU_ARM1020
help
Say Y here to disable branch prediction. If unsure, say N.
......@@ -2,29 +2,48 @@
# Makefile for the linux arm-specific parts of the memory manager.
#
# Object file lists.
obj-y := consistent.o extable.o fault-armv.o \
fault-common.o init.o ioremap.o mm-armv.o
obj-y := consistent.o extable.o fault-armv.o fault-common.o \
init.o ioremap.o mm-armv.o
obj-$(CONFIG_MODULES) += proc-syms.o
obj-$(CONFIG_MODULES) += proc-syms.o
obj-$(CONFIG_ALIGNMENT_TRAP) += alignment.o
obj-$(CONFIG_DISCONTIGMEM) += discontig.o
obj-$(CONFIG_ALIGNMENT_TRAP) += alignment.o
obj-$(CONFIG_DISCONTIGMEM) += discontig.o
# ARMv3
p-$(CONFIG_CPU_ARM610) += proc-arm6_7.o tlb-v3.o cache-v3.o copypage-v3.o
p-$(CONFIG_CPU_ARM710) += proc-arm6_7.o tlb-v3.o cache-v3.o copypage-v3.o
obj-$(CONFIG_CPU_ABRT_EV4) += abort-ev4.o
obj-$(CONFIG_CPU_ABRT_EV4T) += abort-ev4t.o
obj-$(CONFIG_CPU_ABRT_LV4T) += abort-lv4t.o
obj-$(CONFIG_CPU_ABRT_EV5T) += abort-ev5t.o
obj-$(CONFIG_CPU_ABRT_EV5TJ) += abort-ev5tj.o
# ARMv4
p-$(CONFIG_CPU_ARM720T) += proc-arm720.o tlb-v4.o cache-v4.o copypage-v4wt.o abort-lv4t.o
p-$(CONFIG_CPU_ARM920T) += proc-arm920.o tlb-v4wbi.o cache-v4wt.o copypage-v4wb.o abort-ev4t.o
p-$(CONFIG_CPU_ARM922T) += proc-arm922.o tlb-v4wbi.o cache-v4wt.o copypage-v4wb.o abort-ev4t.o
p-$(CONFIG_CPU_ARM1020) += proc-arm1020.o tlb-v4wbi.o cache-v4wt.o copypage-v4wb.o abort-ev4t.o
p-$(CONFIG_CPU_SA110) += proc-sa110.o tlb-v4wb.o cache-v4wb.o copypage-v4wb.o abort-ev4.o
p-$(CONFIG_CPU_SA1100) += proc-sa1100.o tlb-v4wb.o cache-v4wb.o copypage-v4mc.o abort-ev4.o minicache.o
obj-$(CONFIG_CPU_CACHE_V3) += cache-v3.o
obj-$(CONFIG_CPU_CACHE_V4) += cache-v4.o
obj-$(CONFIG_CPU_CACHE_V4WT) += cache-v4wt.o
obj-$(CONFIG_CPU_CACHE_V4WB) += cache-v4wb.o
# ARMv5
p-$(CONFIG_CPU_ARM926T) += proc-arm926.o tlb-v4wbi.o copypage-v4wb.o abort-ev5tej.o
p-$(CONFIG_CPU_XSCALE) += proc-xscale.o tlb-v4wbi.o copypage-xscale.o abort-xscale.o minicache.o
obj-$(CONFIG_CPU_COPY_V3) += copypage-v3.o
obj-$(CONFIG_CPU_COPY_V4WT) += copypage-v4wt.o
obj-$(CONFIG_CPU_COPY_V4WB) += copypage-v4wb.o
obj-$(CONFIG_CPU_SA1100) += copypage-v4mc.o
obj-$(CONFIG_CPU_XSCALE) += copypage-xscale.o
obj-y += $(sort $(p-y))
obj-$(CONFIG_CPU_MINICACHE) += minicache.o
obj-$(CONFIG_CPU_TLB_V3) += tlb-v3.o
obj-$(CONFIG_CPU_TLB_V4WT) += tlb-v4.o
obj-$(CONFIG_CPU_TLB_V4WB) += tlb-v4wb.o
obj-$(CONFIG_CPU_TLB_V4WBI) += tlb-v4wbi.o
obj-$(CONFIG_CPU_ARM610) += proc-arm6_7.o
obj-$(CONFIG_CPU_ARM710) += proc-arm6_7.o
obj-$(CONFIG_CPU_ARM720T) += proc-arm720.o
obj-$(CONFIG_CPU_ARM920T) += proc-arm920.o
obj-$(CONFIG_CPU_ARM922T) += proc-arm922.o
obj-$(CONFIG_CPU_ARM926T) += proc-arm926.o
obj-$(CONFIG_CPU_ARM1020) += proc-arm1020.o
obj-$(CONFIG_CPU_ARM1020E) += proc-arm1020e.o
obj-$(CONFIG_CPU_ARM1022) += proc-arm1022.o
obj-$(CONFIG_CPU_ARM1026) += proc-arm1026.o
obj-$(CONFIG_CPU_SA110) += proc-sa110.o
obj-$(CONFIG_CPU_SA1100) += proc-sa1100.o
obj-$(CONFIG_CPU_XSCALE) += proc-xscale.o
#include <linux/linkage.h>
#include <asm/assembler.h>
/*
* Function: xscale_abort
* Function: v5t_early_abort
*
* Params : r2 = address of aborted instruction
* : r3 = saved SPSR
......@@ -16,12 +16,9 @@
* Note: we read user space. This means we might cause a data
* abort here if the I-TLB and D-TLB aren't seeing the same
* picture. Unfortunately, this does happen. We live with it.
*
* Note: Xscale is contains non-standard architecture extensions.
* It requires its own early abort handler
*/
.align 5
ENTRY(xscale_abort)
ENTRY(v5t_early_abort)
mrc p15, 0, r1, c5, c0, 0 @ get FSR
mrc p15, 0, r0, c6, c0, 0 @ get FAR
tst r3, #PSR_T_BIT
......
#include <linux/linkage.h>
#include <asm/assembler.h>
/*
* Function: v5tej_early_abort
* Function: v5tj_early_abort
*
* Params : r2 = address of aborted instruction
* : r3 = saved SPSR
......@@ -18,19 +18,19 @@
* picture. Unfortunately, this does happen. We live with it.
*/
.align 5
ENTRY(v5tej_early_abort)
ENTRY(v5tj_early_abort)
mrc p15, 0, r1, c5, c0, 0 @ get FSR
mrc p15, 0, r0, c6, c0, 0 @ get FAR
bic r1, r1, #1 << 11 | 1 << 10 @ clear bits 11 and 10 of FSR
tst r3, #PSR_J_BIT
tst r3, #PSR_J_BIT @ Java?
orrne r1, r1, #1 << 11 @ always assume write
bne 1f
tst r3, #PSR_T_BIT
movne pc, lr
tst r3, #PSR_T_BIT @ Thumb?
ldrneh r3, [r2] @ read aborted thumb instruction
ldreq r3, [r2] @ read aborted ARM instruction
movne r3, r3, lsl #(21 - 12) @ move thumb bit 11 to ARM bit 20
tst r3, #1 << 20 @ L = 1 -> write
tst r3, #1 << 20 @ L = 0 -> write
orreq r1, r1, #1 << 11 @ yes.
1: mov pc, lr
mov pc, lr
/*
* linux/arch/arm/mm/arm1020.S: MMU functions for ARM1020
* linux/arch/arm/mm/proc-arm1020.S: MMU functions for ARM1020
*
* Copyright (C) 2000 ARM Limited
* Copyright (C) 2000 Deep Blue Solutions Ltd.
......@@ -29,6 +29,7 @@
#include <linux/init.h>
#include <asm/assembler.h>
#include <asm/constants.h>
#include <asm/pgtable.h>
#include <asm/procinfo.h>
#include <asm/ptrace.h>
#include <asm/hardware.h>
......@@ -379,19 +380,19 @@ ENTRY(cpu_arm1020_switch_mm)
ENTRY(cpu_arm1020_set_pte)
str r1, [r0], #-2048 @ linux version
eor r1, r1, #LPTE_PRESENT | LPTE_YOUNG | LPTE_WRITE | LPTE_DIRTY
eor r1, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY
bic r2, r1, #0xff0
bic r2, r2, #3
orr r2, r2, #HPTE_TYPE_SMALL
bic r2, r1, #PTE_SMALL_AP_MASK
bic r2, r2, #PTE_TYPE_MASK
orr r2, r2, #PTE_TYPE_SMALL
tst r1, #LPTE_USER @ User?
orrne r2, r2, #HPTE_AP_READ
tst r1, #L_PTE_USER @ User?
orrne r2, r2, #PTE_SMALL_AP_URO_SRW
tst r1, #LPTE_WRITE | LPTE_DIRTY @ Write and Dirty?
orreq r2, r2, #HPTE_AP_WRITE
tst r1, #L_PTE_WRITE | L_PTE_DIRTY @ Write and Dirty?
orreq r2, r2, #PTE_SMALL_AP_UNO_SRW
tst r1, #LPTE_PRESENT | LPTE_YOUNG @ Present and Young?
tst r1, #L_PTE_PRESENT | L_PTE_YOUNG @ Present and Young?
movne r2, #0
#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
......@@ -410,7 +411,7 @@ ENTRY(cpu_arm1020_set_pte)
ENTRY(cpu_arm1020_name)
.ascii "Arm1020"
.ascii "ARM1020"
#ifndef CONFIG_CPU_ICACHE_DISABLE
.ascii "i"
#endif
......@@ -445,10 +446,8 @@ __arm1020_setup:
/*
* Clear out 'unwanted' bits (then put them in if we need them)
*/
bic r0, r0, #0x0e00 @ ....??r.........
bic r0, r0, #0x0002 @ ..............a.
bic r0, r0, #0x000c @ W,D
bic r0, r0, #0x1000 @ I
bic r0, r0, #0x1e00 @ i...??r.........
bic r0, r0, #0x000e @ ............wca.
/*
* Turn on what we want
*/
......@@ -490,12 +489,12 @@ arm1020_processor_functions:
.type cpu_arch_name, #object
cpu_arch_name:
.asciz "armv4t"
.asciz "armv5t"
.size cpu_arch_name, . - cpu_arch_name
.type cpu_elf_name, #object
cpu_elf_name:
.asciz "v4"
.asciz "v5"
.size cpu_elf_name, . - cpu_elf_name
.align
......@@ -503,8 +502,8 @@ cpu_elf_name:
.type __arm1020_proc_info,#object
__arm1020_proc_info:
.long 0x4100a200
.long 0xff00fff0
.long 0x4104a200 @ ARM 1020T (Architecture v5T)
.long 0xff0ffff0
.long 0x00000c02 @ mmuflags
b __arm1020_setup
.long cpu_arch_name
......
/*
* linux/arch/arm/mm/proc-arm1020e.S: MMU functions for ARM1020
*
* Copyright (C) 2000 ARM Limited
* Copyright (C) 2000 Deep Blue Solutions Ltd.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*
* These are the low level assembler for performing cache and TLB
* functions on the arm1020e.
*
* CONFIG_CPU_ARM1020_CPU_IDLE -> nohlt
*/
#include <linux/linkage.h>
#include <linux/config.h>
#include <linux/init.h>
#include <asm/assembler.h>
#include <asm/constants.h>
#include <asm/pgtable.h>
#include <asm/procinfo.h>
#include <asm/ptrace.h>
#include <asm/hardware.h>
/*
* This is the maximum size of an area which will be invalidated
* using the single invalidate entry instructions. Anything larger
* than this, and we go for the whole cache.
*
* This value should be chosen such that we choose the cheapest
* alternative.
*/
#define MAX_AREA_SIZE 32768
/*
* The size of one data cache line.
*/
#define CACHE_DLINESIZE 32
/*
* The number of data cache segments.
*/
#define CACHE_DSEGMENTS 16
/*
* The number of lines in a cache segment.
*/
#define CACHE_DENTRIES 64
/*
* This is the size at which it becomes more efficient to
* clean the whole cache, rather than using the individual
* cache line maintainence instructions.
*/
#define CACHE_DLIMIT 32768
.text
/*
* cpu_arm1020e_proc_init()
*/
ENTRY(cpu_arm1020e_proc_init)
mov pc, lr
/*
* cpu_arm1020e_proc_fin()
*/
ENTRY(cpu_arm1020e_proc_fin)
stmfd sp!, {lr}
mov ip, #PSR_F_BIT | PSR_I_BIT | SVC_MODE
msr cpsr_c, ip
bl arm1020e_flush_kern_cache_all
mrc p15, 0, r0, c1, c0, 0 @ ctrl register
bic r0, r0, #0x1000 @ ...i............
bic r0, r0, #0x000e @ ............wca.
mcr p15, 0, r0, c1, c0, 0 @ disable caches
ldmfd sp!, {pc}
/*
* cpu_arm1020e_reset(loc)
*
* Perform a soft reset of the system. Put the CPU into the
* same state as it would be if it had been reset, and branch
* to what would be the reset vector.
*
* loc: location to jump to for soft reset
*/
.align 5
ENTRY(cpu_arm1020e_reset)
mov ip, #0
mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches
mcr p15, 0, ip, c7, c10, 4 @ drain WB
mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs
mrc p15, 0, ip, c1, c0, 0 @ ctrl register
bic ip, ip, #0x000f @ ............wcam
bic ip, ip, #0x1100 @ ...i...s........
mcr p15, 0, ip, c1, c0, 0 @ ctrl register
mov pc, r0
/*
* cpu_arm1020e_do_idle()
*/
.align 5
ENTRY(cpu_arm1020e_do_idle)
mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt
mov pc, lr
/* ================================= CACHE ================================ */
.align 5
/*
* flush_user_cache_all()
*
* Invalidate all cache entries in a particular address
* space.
*/
ENTRY(arm1020e_flush_user_cache_all)
/* FALLTHROUGH */
/*
* flush_kern_cache_all()
*
* Clean and invalidate the entire cache.
*/
ENTRY(arm1020e_flush_kern_cache_all)
mov r2, #VM_EXEC
mov ip, #0
__flush_whole_cache:
#ifndef CONFIG_CPU_DCACHE_DISABLE
mcr p15, 0, ip, c7, c10, 4 @ drain WB
mov r1, #(CACHE_DSEGMENTS - 1) << 5 @ 16 segments
1: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries
2: mcr p15, 0, r3, c7, c14, 2 @ clean+invalidate D index
subs r3, r3, #1 << 26
bcs 2b @ entries 63 to 0
subs r1, r1, #1 << 5
bcs 1b @ segments 15 to 0
#endif
tst r2, #VM_EXEC
#ifndef CONFIG_CPU_ICACHE_DISABLE
mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
#endif
mcrne p15, 0, ip, c7, c10, 4 @ drain WB
mov pc, lr
/*
* flush_user_cache_range(start, end, flags)
*
* Invalidate a range of cache entries in the specified
* address space.
*
* - start - start address (inclusive)
* - end - end address (exclusive)
* - flags - vm_flags for this space
*/
ENTRY(arm1020e_flush_user_cache_range)
mov ip, #0
sub r3, r1, r0 @ calculate total size
cmp r3, #CACHE_DLIMIT
bhs __flush_whole_cache
#ifndef CONFIG_CPU_DCACHE_DISABLE
1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
add r0, r0, #CACHE_DLINESIZE
cmp r0, r1
blo 1b
#endif
tst r2, #VM_EXEC
#ifndef CONFIG_CPU_ICACHE_DISABLE
mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
#endif
mcrne p15, 0, ip, c7, c10, 4 @ drain WB
mov pc, lr
/*
* coherent_kern_range(start, end)
*
* Ensure coherency between the Icache and the Dcache in the
* region described by start. If you have non-snooping
* Harvard caches, you need to implement this function.
*
* - start - virtual start address
* - end - virtual end address
*/
ENTRY(arm1020e_coherent_kern_range)
mov ip, #0
bic r0, r0, #CACHE_DLINESIZE - 1
1:
#ifndef CONFIG_CPU_DCACHE_DISABLE
mcr p15, 0, r0, c7, c10, 1 @ clean D entry
#endif
#ifndef CONFIG_CPU_ICACHE_DISABLE
mcr p15, 0, r0, c7, c5, 1 @ invalidate I entry
#endif
add r0, r0, #CACHE_DLINESIZE
cmp r0, r1
blo 1b
mcr p15, 0, ip, c7, c10, 4 @ drain WB
mov pc, lr
/*
* flush_kern_dcache_page(void *page)
*
* Ensure no D cache aliasing occurs, either with itself or
* the I cache
*
* - page - page aligned address
*/
ENTRY(arm1020e_flush_kern_dcache_page)
mov ip, #0
#ifndef CONFIG_CPU_DCACHE_DISABLE
add r1, r0, #PAGE_SZ
1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
add r0, r0, #CACHE_DLINESIZE
cmp r0, r1
blo 1b
#endif
mcr p15, 0, ip, c7, c10, 4 @ drain WB
mov pc, lr
/*
* dma_inv_range(start, end)
*
* Invalidate (discard) the specified virtual address range.
* May not write back any entries. If 'start' or 'end'
* are not cache line aligned, those lines must be written
* back.
*
* - start - virtual start address
* - end - virtual end address
*
* (same as v4wb)
*/
ENTRY(arm1020e_dma_inv_range)
mov ip, #0
#ifndef CONFIG_CPU_DCACHE_DISABLE
tst r0, #CACHE_DLINESIZE - 1
bic r0, r0, #CACHE_DLINESIZE - 1
mcrne p15, 0, r0, c7, c10, 1 @ clean D entry
tst r1, #CACHE_DLINESIZE - 1
mcrne p15, 0, r1, c7, c10, 1 @ clean D entry
1: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
add r0, r0, #CACHE_DLINESIZE
cmp r0, r1
blo 1b
#endif
mcr p15, 0, ip, c7, c10, 4 @ drain WB
mov pc, lr
/*
* dma_clean_range(start, end)
*
* Clean the specified virtual address range.
*
* - start - virtual start address
* - end - virtual end address
*
* (same as v4wb)
*/
ENTRY(arm1020e_dma_clean_range)
mov ip, #0
#ifndef CONFIG_CPU_DCACHE_DISABLE
bic r0, r0, #CACHE_DLINESIZE - 1
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
add r0, r0, #CACHE_DLINESIZE
cmp r0, r1
blo 1b
#endif
mcr p15, 0, ip, c7, c10, 4 @ drain WB
mov pc, lr
/*
* dma_flush_range(start, end)
*
* Clean and invalidate the specified virtual address range.
*
* - start - virtual start address
* - end - virtual end address
*/
ENTRY(arm1020e_dma_flush_range)
mov ip, #0
#ifndef CONFIG_CPU_DCACHE_DISABLE
bic r0, r0, #CACHE_DLINESIZE - 1
1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
add r0, r0, #CACHE_DLINESIZE
cmp r0, r1
blo 1b
#endif
mcr p15, 0, ip, c7, c10, 4 @ drain WB
mov pc, lr
ENTRY(arm1020e_cache_fns)
.long arm1020e_flush_kern_cache_all
.long arm1020e_flush_user_cache_all
.long arm1020e_flush_user_cache_range
.long arm1020e_coherent_kern_range
.long arm1020e_flush_kern_dcache_page
.long arm1020e_dma_inv_range
.long arm1020e_dma_clean_range
.long arm1020e_dma_flush_range
.align 5
ENTRY(cpu_arm1020e_dcache_clean_area)
#ifndef CONFIG_CPU_DCACHE_DISABLE
mov ip, #0
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
add r0, r0, #CACHE_DLINESIZE
subs r1, r1, #CACHE_DLINESIZE
bhi 1b
#endif
mov pc, lr
/* =============================== PageTable ============================== */
/*
* cpu_arm1020e_switch_mm(pgd)
*
* Set the translation base pointer to be as described by pgd.
*
* pgd: new page tables
*/
.align 5
ENTRY(cpu_arm1020e_switch_mm)
#ifndef CONFIG_CPU_DCACHE_DISABLE
mcr p15, 0, r3, c7, c10, 4
mov r1, #0xF @ 16 segments
1: mov r3, #0x3F @ 64 entries
2: mov ip, r3, LSL #26 @ shift up entry
orr ip, ip, r1, LSL #5 @ shift in/up index
mcr p15, 0, ip, c7, c14, 2 @ Clean & Inval DCache entry
mov ip, #0
subs r3, r3, #1
cmp r3, #0
bge 2b @ entries 3F to 0
subs r1, r1, #1
cmp r1, #0
bge 1b @ segments 15 to 0
#endif
mov r1, #0
#ifndef CONFIG_CPU_ICACHE_DISABLE
mcr p15, 0, r1, c7, c5, 0 @ invalidate I cache
#endif
mcr p15, 0, r1, c7, c10, 4 @ drain WB
mcr p15, 0, r0, c2, c0, 0 @ load page table pointer
mcr p15, 0, r1, c8, c7, 0 @ invalidate I & D TLBs
mov pc, lr
/*
* cpu_arm1020e_set_pte(ptep, pte)
*
* Set a PTE and flush it out
*/
.align 5
ENTRY(cpu_arm1020e_set_pte)
str r1, [r0], #-2048 @ linux version
eor r1, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY
bic r2, r1, #PTE_SMALL_AP_MASK
bic r2, r2, #PTE_TYPE_MASK
orr r2, r2, #PTE_TYPE_SMALL
tst r1, #L_PTE_USER @ User?
orrne r2, r2, #PTE_SMALL_AP_URO_SRW
tst r1, #L_PTE_WRITE | L_PTE_DIRTY @ Write and Dirty?
orreq r2, r2, #PTE_SMALL_AP_UNO_SRW
tst r1, #L_PTE_PRESENT | L_PTE_YOUNG @ Present and Young?
movne r2, #0
#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
eor r3, r1, #0x0a @ C & small page?
tst r3, #0x0b
biceq r2, r2, #4
#endif
str r2, [r0] @ hardware version
mov r0, r0
#ifndef CONFIG_CPU_DCACHE_DISABLE
mcr p15, 0, r0, c7, c10, 1 @ clean D entry
#endif
mov pc, lr
ENTRY(cpu_arm1020e_name)
.ascii "ARM1020E"
#ifndef CONFIG_CPU_ICACHE_DISABLE
.ascii "i"
#endif
#ifndef CONFIG_CPU_DCACHE_DISABLE
.ascii "d"
#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
.ascii "(wt)"
#else
.ascii "(wb)"
#endif
#endif
#ifndef CONFIG_CPU_BPREDICT_DISABLE
.ascii "B"
#endif
#ifdef CONFIG_CPU_CACHE_ROUND_ROBIN
.ascii "RR"
#endif
.ascii "\0"
.align
__INIT
__arm1020e_setup:
mov r0, #0
mcr p15, 0, r0, c7, c7 @ invalidate I,D caches on v4
mcr p15, 0, r0, c7, c10, 4 @ drain write buffer on v4
mcr p15, 0, r0, c8, c7 @ invalidate I,D TLBs on v4
mcr p15, 0, r4, c2, c0 @ load page table pointer
mov r0, #0x1f @ Domains 0, 1 = client
mcr p15, 0, r0, c3, c0 @ load domain access register
mrc p15, 0, r0, c1, c0 @ get control register v4
/*
* Clear out 'unwanted' bits (then put them in if we need them)
*/
bic r0, r0, #0x1e00 @ i...??r.........
bic r0, r0, #0x000e @ ............wca.
/*
* Turn on what we want
*/
orr r0, r0, #0x0031 @ ..........DP...M
orr r0, r0, #0x0100 @ .......S........
#ifdef CONFIG_CPU_CACHE_ROUND_ROBIN
orr r0, r0, #0x4000 @ .R..............
#endif
#ifndef CONFIG_CPU_BPREDICT_DISABLE
orr r0, r0, #0x0800 @ ....Z...........
#endif
#ifndef CONFIG_CPU_DCACHE_DISABLE
orr r0, r0, #0x0004 @ Enable D cache
#endif
#ifndef CONFIG_CPU_ICACHE_DISABLE
orr r0, r0, #0x1000 @ I Cache on
#endif
mov pc, lr
.text
/*
* Purpose : Function pointers used to access above functions - all calls
* come through these
*/
.type arm1020e_processor_functions, #object
arm1020e_processor_functions:
.word v4t_early_abort
.word cpu_arm1020e_proc_init
.word cpu_arm1020e_proc_fin
.word cpu_arm1020e_reset
.word cpu_arm1020e_do_idle
.word cpu_arm1020e_dcache_clean_area
.word cpu_arm1020e_switch_mm
.word cpu_arm1020e_set_pte
.size arm1020e_processor_functions, . - arm1020e_processor_functions
.type cpu_arch_name, #object
cpu_arch_name:
.asciz "armv5te"
.size cpu_arch_name, . - cpu_arch_name
.type cpu_elf_name, #object
cpu_elf_name:
.asciz "v5"
.size cpu_elf_name, . - cpu_elf_name
.align
.section ".proc.info", #alloc, #execinstr
.type __arm1020e_proc_info,#object
__arm1020e_proc_info:
.long 0x4105a200 @ ARM 1020TE (Architecture v5TE)
.long 0xff0ffff0
.long 0x00000c12 @ mmuflags
b __arm1020e_setup
.long cpu_arch_name
.long cpu_elf_name
.long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB
.long cpu_arm1020e_name
.long arm1020e_processor_functions
.long v4wbi_tlb_fns
.long v4wb_user_fns
.long arm1020e_cache_fns
.size __arm1020e_proc_info, . - __arm1020e_proc_info
/*
* linux/arch/arm/mm/proc-arm1022.S: MMU functions for ARM1022E
*
* Copyright (C) 2000 ARM Limited
* Copyright (C) 2000 Deep Blue Solutions Ltd.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
*
* These are the low level assembler for performing cache and TLB
* functions on the ARM1022E.
*/
#include <linux/linkage.h>
#include <linux/config.h>
#include <linux/init.h>
#include <asm/assembler.h>
#include <asm/constants.h>
#include <asm/pgtable.h>
#include <asm/procinfo.h>
#include <asm/ptrace.h>
/*
* This is the maximum size of an area which will be invalidated
* using the single invalidate entry instructions. Anything larger
* than this, and we go for the whole cache.
*
* This value should be chosen such that we choose the cheapest
* alternative.
*/
#define MAX_AREA_SIZE 32768
/*
* The size of one data cache line.
*/
#define CACHE_DLINESIZE 32
/*
* The number of data cache segments.
*/
#define CACHE_DSEGMENTS 16
/*
* The number of lines in a cache segment.
*/
#define CACHE_DENTRIES 64
/*
* This is the size at which it becomes more efficient to
* clean the whole cache, rather than using the individual
* cache line maintainence instructions.
*/
#define CACHE_DLIMIT 32768
.text
/*
* cpu_arm1022_proc_init()
*/
ENTRY(cpu_arm1022_proc_init)
mov pc, lr
/*
* cpu_arm1022_proc_fin()
*/
ENTRY(cpu_arm1022_proc_fin)
stmfd sp!, {lr}
mov ip, #PSR_F_BIT | PSR_I_BIT | SVC_MODE
msr cpsr_c, ip
bl arm1022_flush_kern_cache_all
mrc p15, 0, r0, c1, c0, 0 @ ctrl register
bic r0, r0, #0x1000 @ ...i............
bic r0, r0, #0x000e @ ............wca.
mcr p15, 0, r0, c1, c0, 0 @ disable caches
ldmfd sp!, {pc}
/*
* cpu_arm1022_reset(loc)
*
* Perform a soft reset of the system. Put the CPU into the
* same state as it would be if it had been reset, and branch
* to what would be the reset vector.
*
* loc: location to jump to for soft reset
*/
.align 5
ENTRY(cpu_arm1022_reset)
mov ip, #0
mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches
mcr p15, 0, ip, c7, c10, 4 @ drain WB
mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs
mrc p15, 0, ip, c1, c0, 0 @ ctrl register
bic ip, ip, #0x000f @ ............wcam
bic ip, ip, #0x1100 @ ...i...s........
mcr p15, 0, ip, c1, c0, 0 @ ctrl register
mov pc, r0
/*
* cpu_arm1022_do_idle()
*/
.align 5
ENTRY(cpu_arm1022_do_idle)
mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt
mov pc, lr
/* ================================= CACHE ================================ */
.align 5
/*
* flush_user_cache_all()
*
* Invalidate all cache entries in a particular address
* space.
*/
ENTRY(arm1022_flush_user_cache_all)
/* FALLTHROUGH */
/*
* flush_kern_cache_all()
*
* Clean and invalidate the entire cache.
*/
ENTRY(arm1022_flush_kern_cache_all)
mov r2, #VM_EXEC
mov ip, #0
__flush_whole_cache:
#ifndef CONFIG_CPU_DCACHE_DISABLE
mov r1, #(CACHE_DSEGMENTS - 1) << 5 @ 16 segments
1: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries
2: mcr p15, 0, r3, c7, c14, 2 @ clean+invalidate D index
subs r3, r3, #1 << 26
bcs 2b @ entries 63 to 0
subs r1, r1, #1 << 5
bcs 1b @ segments 15 to 0
#endif
tst r2, #VM_EXEC
#ifndef CONFIG_CPU_ICACHE_DISABLE
mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
#endif
mcrne p15, 0, ip, c7, c10, 4 @ drain WB
mov pc, lr
/*
* flush_user_cache_range(start, end, flags)
*
* Invalidate a range of cache entries in the specified
* address space.
*
* - start - start address (inclusive)
* - end - end address (exclusive)
* - flags - vm_flags for this space
*/
ENTRY(arm1022_flush_user_cache_range)
mov ip, #0
sub r3, r1, r0 @ calculate total size
cmp r3, #CACHE_DLIMIT
bhs __flush_whole_cache
#ifndef CONFIG_CPU_DCACHE_DISABLE
1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
add r0, r0, #CACHE_DLINESIZE
cmp r0, r1
blo 1b
#endif
tst r2, #VM_EXEC
#ifndef CONFIG_CPU_ICACHE_DISABLE
mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
#endif
mcrne p15, 0, ip, c7, c10, 4 @ drain WB
mov pc, lr
/*
* coherent_kern_range(start, end)
*
* Ensure coherency between the Icache and the Dcache in the
* region described by start. If you have non-snooping
* Harvard caches, you need to implement this function.
*
* - start - virtual start address
* - end - virtual end address
*/
ENTRY(arm1022_coherent_kern_range)
mov ip, #0
bic r0, r0, #CACHE_DLINESIZE - 1
1:
#ifndef CONFIG_CPU_DCACHE_DISABLE
mcr p15, 0, r0, c7, c10, 1 @ clean D entry
#endif
#ifndef CONFIG_CPU_ICACHE_DISABLE
mcr p15, 0, r0, c7, c5, 1 @ invalidate I entry
#endif
add r0, r0, #CACHE_DLINESIZE
cmp r0, r1
blo 1b
mcr p15, 0, ip, c7, c10, 4 @ drain WB
mov pc, lr
/*
* flush_kern_dcache_page(void *page)
*
* Ensure no D cache aliasing occurs, either with itself or
* the I cache
*
* - page - page aligned address
*/
ENTRY(arm1022_flush_kern_dcache_page)
mov ip, #0
#ifndef CONFIG_CPU_DCACHE_DISABLE
add r1, r0, #PAGE_SZ
1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
add r0, r0, #CACHE_DLINESIZE
cmp r0, r1
blo 1b
#endif
mcr p15, 0, ip, c7, c10, 4 @ drain WB
mov pc, lr
/*
* dma_inv_range(start, end)
*
* Invalidate (discard) the specified virtual address range.
* May not write back any entries. If 'start' or 'end'
* are not cache line aligned, those lines must be written
* back.
*
* - start - virtual start address
* - end - virtual end address
*
* (same as v4wb)
*/
ENTRY(arm1022_dma_inv_range)
mov ip, #0
#ifndef CONFIG_CPU_DCACHE_DISABLE
tst r0, #CACHE_DLINESIZE - 1
bic r0, r0, #CACHE_DLINESIZE - 1
mcrne p15, 0, r0, c7, c10, 1 @ clean D entry
tst r1, #CACHE_DLINESIZE - 1
mcrne p15, 0, r1, c7, c10, 1 @ clean D entry
1: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
add r0, r0, #CACHE_DLINESIZE
cmp r0, r1
blo 1b
#endif
mcr p15, 0, ip, c7, c10, 4 @ drain WB
mov pc, lr
/*
* dma_clean_range(start, end)
*
* Clean the specified virtual address range.
*
* - start - virtual start address
* - end - virtual end address
*
* (same as v4wb)
*/
ENTRY(arm1022_dma_clean_range)
mov ip, #0
#ifndef CONFIG_CPU_DCACHE_DISABLE
bic r0, r0, #CACHE_DLINESIZE - 1
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
add r0, r0, #CACHE_DLINESIZE
cmp r0, r1
blo 1b
#endif
mcr p15, 0, ip, c7, c10, 4 @ drain WB
mov pc, lr
/*
* dma_flush_range(start, end)
*
* Clean and invalidate the specified virtual address range.
*
* - start - virtual start address
* - end - virtual end address
*/
ENTRY(arm1022_dma_flush_range)
mov ip, #0
#ifndef CONFIG_CPU_DCACHE_DISABLE
bic r0, r0, #CACHE_DLINESIZE - 1
1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
add r0, r0, #CACHE_DLINESIZE
cmp r0, r1
blo 1b
#endif
mcr p15, 0, ip, c7, c10, 4 @ drain WB
mov pc, lr
ENTRY(arm1022_cache_fns)
.long arm1022_flush_kern_cache_all
.long arm1022_flush_user_cache_all
.long arm1022_flush_user_cache_range
.long arm1022_coherent_kern_range
.long arm1022_flush_kern_dcache_page
.long arm1022_dma_inv_range
.long arm1022_dma_clean_range
.long arm1022_dma_flush_range
.align 5
ENTRY(cpu_arm1022_dcache_clean_area)
#ifndef CONFIG_CPU_DCACHE_DISABLE
mov ip, #0
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
add r0, r0, #CACHE_DLINESIZE
subs r1, r1, #CACHE_DLINESIZE
bhi 1b
#endif
mov pc, lr
/* =============================== PageTable ============================== */
/*
* cpu_arm1022_switch_mm(pgd)
*
* Set the translation base pointer to be as described by pgd.
*
* pgd: new page tables
*/
.align 5
ENTRY(cpu_arm1022_switch_mm)
#ifndef CONFIG_CPU_DCACHE_DISABLE
mov r1, #(CACHE_DSEGMENTS - 1) << 5 @ 16 segments
1: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries
2: mcr p15, 0, r3, c7, c14, 2 @ clean+invalidate D index
subs r3, r3, #1 << 26
bcs 2b @ entries 63 to 0
subs r1, r1, #1 << 5
bcs 1b @ segments 15 to 0
#endif
mov r1, #0
#ifndef CONFIG_CPU_ICACHE_DISABLE
mcr p15, 0, r1, c7, c5, 0 @ invalidate I cache
#endif
mcr p15, 0, r1, c7, c10, 4 @ drain WB
mcr p15, 0, r0, c2, c0, 0 @ load page table pointer
mcr p15, 0, r1, c8, c7, 0 @ invalidate I & D TLBs
mov pc, lr
/*
* cpu_arm1022_set_pte(ptep, pte)
*
* Set a PTE and flush it out
*/
.align 5
ENTRY(cpu_arm1022_set_pte)
str r1, [r0], #-2048 @ linux version
eor r1, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY
bic r2, r1, #PTE_SMALL_AP_MASK
bic r2, r2, #PTE_TYPE_MASK
orr r2, r2, #PTE_TYPE_SMALL
tst r1, #L_PTE_USER @ User?
orrne r2, r2, #PTE_SMALL_AP_URO_SRW
tst r1, #L_PTE_WRITE | L_PTE_DIRTY @ Write and Dirty?
orreq r2, r2, #PTE_SMALL_AP_UNO_SRW
tst r1, #L_PTE_PRESENT | L_PTE_YOUNG @ Present and Young?
movne r2, #0
#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
eor r3, r1, #0x0a @ C & small page?
tst r3, #0x0b
biceq r2, r2, #4
#endif
str r2, [r0] @ hardware version
mov r0, r0
#ifndef CONFIG_CPU_DCACHE_DISABLE
mcr p15, 0, r0, c7, c10, 1 @ clean D entry
#endif
mov pc, lr
ENTRY(cpu_arm1022_name)
.ascii "arm1022"
#ifndef CONFIG_CPU_ICACHE_DISABLE
.ascii "i"
#endif
#ifndef CONFIG_CPU_DCACHE_DISABLE
.ascii "d"
#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
.ascii "(wt)"
#else
.ascii "(wb)"
#endif
#endif
#ifndef CONFIG_CPU_BPREDICT_DISABLE
.ascii "B"
#endif
#ifdef CONFIG_CPU_CACHE_ROUND_ROBIN
.ascii "RR"
#endif
.ascii "\0"
.align
__INIT
__arm1022_setup:
mov r0, #0
mcr p15, 0, r0, c7, c7 @ invalidate I,D caches on v4
mcr p15, 0, r0, c7, c10, 4 @ drain write buffer on v4
mcr p15, 0, r0, c8, c7 @ invalidate I,D TLBs on v4
mcr p15, 0, r4, c2, c0 @ load page table pointer
mov r0, #0x1f @ Domains 0, 1 = client
mcr p15, 0, r0, c3, c0 @ load domain access register
mrc p15, 0, r0, c1, c0 @ get control register v4
/*
* Clear out 'unwanted' bits (then put them in if we need them)
*/
bic r0, r0, #0x1e00 @ ...i??r.........
bic r0, r0, #0x000e @ ............wca.
/*
* Turn on what we want
*/
orr r0, r0, #0x0031 @ ..........DP...M
orr r0, r0, #0x2100 @ ..V....S........
#ifdef CONFIG_CPU_CACHE_ROUND_ROBIN
orr r0, r0, #0x4000 @ .R..............
#endif
#ifndef CONFIG_CPU_BPREDICT_DISABLE
orr r0, r0, #0x0800 @ ....Z...........
#endif
#ifndef CONFIG_CPU_DCACHE_DISABLE
orr r0, r0, #0x0004 @ .............C..
#endif
#ifndef CONFIG_CPU_ICACHE_DISABLE
orr r0, r0, #0x1000 @ ...I............
#endif
mov pc, lr
.text
/*
* Purpose : Function pointers used to access above functions - all calls
* come through these
*/
.type arm1022_processor_functions, #object
arm1022_processor_functions:
.word v4t_early_abort
.word cpu_arm1022_proc_init
.word cpu_arm1022_proc_fin
.word cpu_arm1022_reset
.word cpu_arm1022_do_idle
.word cpu_arm1022_dcache_clean_area
.word cpu_arm1022_switch_mm
.word cpu_arm1022_set_pte
.size arm1022_processor_functions, . - arm1022_processor_functions
.type cpu_arch_name, #object
cpu_arch_name:
.asciz "armv5te"
.size cpu_arch_name, . - cpu_arch_name
.type cpu_elf_name, #object
cpu_elf_name:
.asciz "v5"
.size cpu_elf_name, . - cpu_elf_name
.align
.section ".proc.info", #alloc, #execinstr
.type __arm1022_proc_info,#object
__arm1022_proc_info:
.long 0x4105a220 @ ARM 1022E (v5TE)
.long 0xff0ffff0
.long 0x00000c12 @ mmuflags
b __arm1022_setup
.long cpu_arch_name
.long cpu_elf_name
.long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB
.long cpu_arm1022_name
.long arm1022_processor_functions
.long v4wbi_tlb_fns
.long v4wb_user_fns
.long arm1022_cache_fns
.size __arm1022_proc_info, . - __arm1022_proc_info
/*
* linux/arch/arm/mm/proc-arm1026.S: MMU functions for ARM1026EJ-S
*
* Copyright (C) 2000 ARM Limited
* Copyright (C) 2000 Deep Blue Solutions Ltd.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
*
* These are the low level assembler for performing cache and TLB
* functions on the ARM1026EJ-S.
*/
#include <linux/linkage.h>
#include <linux/config.h>
#include <linux/init.h>
#include <asm/assembler.h>
#include <asm/constants.h>
#include <asm/pgtable.h>
#include <asm/procinfo.h>
#include <asm/ptrace.h>
/*
* This is the maximum size of an area which will be invalidated
* using the single invalidate entry instructions. Anything larger
* than this, and we go for the whole cache.
*
* This value should be chosen such that we choose the cheapest
* alternative.
*/
#define MAX_AREA_SIZE 32768
/*
* The size of one data cache line.
*/
#define CACHE_DLINESIZE 32
/*
* The number of data cache segments.
*/
#define CACHE_DSEGMENTS 16
/*
* The number of lines in a cache segment.
*/
#define CACHE_DENTRIES 64
/*
* This is the size at which it becomes more efficient to
* clean the whole cache, rather than using the individual
* cache line maintainence instructions.
*/
#define CACHE_DLIMIT 32768
.text
/*
* cpu_arm1026_proc_init()
*/
ENTRY(cpu_arm1026_proc_init)
mov pc, lr
/*
* cpu_arm1026_proc_fin()
*/
ENTRY(cpu_arm1026_proc_fin)
stmfd sp!, {lr}
mov ip, #PSR_F_BIT | PSR_I_BIT | SVC_MODE
msr cpsr_c, ip
bl arm1026_flush_kern_cache_all
mrc p15, 0, r0, c1, c0, 0 @ ctrl register
bic r0, r0, #0x1000 @ ...i............
bic r0, r0, #0x000e @ ............wca.
mcr p15, 0, r0, c1, c0, 0 @ disable caches
ldmfd sp!, {pc}
/*
* cpu_arm1026_reset(loc)
*
* Perform a soft reset of the system. Put the CPU into the
* same state as it would be if it had been reset, and branch
* to what would be the reset vector.
*
* loc: location to jump to for soft reset
*/
.align 5
ENTRY(cpu_arm1026_reset)
mov ip, #0
mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches
mcr p15, 0, ip, c7, c10, 4 @ drain WB
mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs
mrc p15, 0, ip, c1, c0, 0 @ ctrl register
bic ip, ip, #0x000f @ ............wcam
bic ip, ip, #0x1100 @ ...i...s........
mcr p15, 0, ip, c1, c0, 0 @ ctrl register
mov pc, r0
/*
* cpu_arm1026_do_idle()
*/
.align 5
ENTRY(cpu_arm1026_do_idle)
mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt
mov pc, lr
/* ================================= CACHE ================================ */
.align 5
/*
* flush_user_cache_all()
*
* Invalidate all cache entries in a particular address
* space.
*/
ENTRY(arm1026_flush_user_cache_all)
/* FALLTHROUGH */
/*
* flush_kern_cache_all()
*
* Clean and invalidate the entire cache.
*/
ENTRY(arm1026_flush_kern_cache_all)
mov r2, #VM_EXEC
mov ip, #0
__flush_whole_cache:
#ifndef CONFIG_CPU_DCACHE_DISABLE
1: mrc p15, 0, r15, c7, c14, 3 @ test, clean, invalidate
bne 1b
#endif
tst r2, #VM_EXEC
#ifndef CONFIG_CPU_ICACHE_DISABLE
mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
#endif
mcrne p15, 0, ip, c7, c10, 4 @ drain WB
mov pc, lr
/*
* flush_user_cache_range(start, end, flags)
*
* Invalidate a range of cache entries in the specified
* address space.
*
* - start - start address (inclusive)
* - end - end address (exclusive)
* - flags - vm_flags for this space
*/
ENTRY(arm1026_flush_user_cache_range)
mov ip, #0
sub r3, r1, r0 @ calculate total size
cmp r3, #CACHE_DLIMIT
bhs __flush_whole_cache
#ifndef CONFIG_CPU_DCACHE_DISABLE
1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
add r0, r0, #CACHE_DLINESIZE
cmp r0, r1
blo 1b
#endif
tst r2, #VM_EXEC
#ifndef CONFIG_CPU_ICACHE_DISABLE
mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
#endif
mcrne p15, 0, ip, c7, c10, 4 @ drain WB
mov pc, lr
/*
* coherent_kern_range(start, end)
*
* Ensure coherency between the Icache and the Dcache in the
* region described by start. If you have non-snooping
* Harvard caches, you need to implement this function.
*
* - start - virtual start address
* - end - virtual end address
*/
ENTRY(arm1026_coherent_kern_range)
mov ip, #0
bic r0, r0, #CACHE_DLINESIZE - 1
1:
#ifndef CONFIG_CPU_DCACHE_DISABLE
mcr p15, 0, r0, c7, c10, 1 @ clean D entry
#endif
#ifndef CONFIG_CPU_ICACHE_DISABLE
mcr p15, 0, r0, c7, c5, 1 @ invalidate I entry
#endif
add r0, r0, #CACHE_DLINESIZE
cmp r0, r1
blo 1b
mcr p15, 0, ip, c7, c10, 4 @ drain WB
mov pc, lr
/*
* flush_kern_dcache_page(void *page)
*
* Ensure no D cache aliasing occurs, either with itself or
* the I cache
*
* - page - page aligned address
*/
ENTRY(arm1026_flush_kern_dcache_page)
mov ip, #0
#ifndef CONFIG_CPU_DCACHE_DISABLE
add r1, r0, #PAGE_SZ
1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
add r0, r0, #CACHE_DLINESIZE
cmp r0, r1
blo 1b
#endif
mcr p15, 0, ip, c7, c10, 4 @ drain WB
mov pc, lr
/*
* dma_inv_range(start, end)
*
* Invalidate (discard) the specified virtual address range.
* May not write back any entries. If 'start' or 'end'
* are not cache line aligned, those lines must be written
* back.
*
* - start - virtual start address
* - end - virtual end address
*
* (same as v4wb)
*/
ENTRY(arm1026_dma_inv_range)
mov ip, #0
#ifndef CONFIG_CPU_DCACHE_DISABLE
tst r0, #CACHE_DLINESIZE - 1
bic r0, r0, #CACHE_DLINESIZE - 1
mcrne p15, 0, r0, c7, c10, 1 @ clean D entry
tst r1, #CACHE_DLINESIZE - 1
mcrne p15, 0, r1, c7, c10, 1 @ clean D entry
1: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
add r0, r0, #CACHE_DLINESIZE
cmp r0, r1
blo 1b
#endif
mcr p15, 0, ip, c7, c10, 4 @ drain WB
mov pc, lr
/*
* dma_clean_range(start, end)
*
* Clean the specified virtual address range.
*
* - start - virtual start address
* - end - virtual end address
*
* (same as v4wb)
*/
ENTRY(arm1026_dma_clean_range)
mov ip, #0
#ifndef CONFIG_CPU_DCACHE_DISABLE
bic r0, r0, #CACHE_DLINESIZE - 1
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
add r0, r0, #CACHE_DLINESIZE
cmp r0, r1
blo 1b
#endif
mcr p15, 0, ip, c7, c10, 4 @ drain WB
mov pc, lr
/*
* dma_flush_range(start, end)
*
* Clean and invalidate the specified virtual address range.
*
* - start - virtual start address
* - end - virtual end address
*/
ENTRY(arm1026_dma_flush_range)
mov ip, #0
#ifndef CONFIG_CPU_DCACHE_DISABLE
bic r0, r0, #CACHE_DLINESIZE - 1
1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
add r0, r0, #CACHE_DLINESIZE
cmp r0, r1
blo 1b
#endif
mcr p15, 0, ip, c7, c10, 4 @ drain WB
mov pc, lr
ENTRY(arm1026_cache_fns)
.long arm1026_flush_kern_cache_all
.long arm1026_flush_user_cache_all
.long arm1026_flush_user_cache_range
.long arm1026_coherent_kern_range
.long arm1026_flush_kern_dcache_page
.long arm1026_dma_inv_range
.long arm1026_dma_clean_range
.long arm1026_dma_flush_range
.align 5
ENTRY(cpu_arm1026_dcache_clean_area)
#ifndef CONFIG_CPU_DCACHE_DISABLE
mov ip, #0
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
add r0, r0, #CACHE_DLINESIZE
subs r1, r1, #CACHE_DLINESIZE
bhi 1b
#endif
mov pc, lr
/* =============================== PageTable ============================== */
/*
* cpu_arm1026_switch_mm(pgd)
*
* Set the translation base pointer to be as described by pgd.
*
* pgd: new page tables
*/
.align 5
ENTRY(cpu_arm1026_switch_mm)
mov r1, #0
#ifndef CONFIG_CPU_DCACHE_DISABLE
1: mrc p15, 0, r15, c7, c14, 3 @ test, clean, invalidate
bne 1b
#endif
#ifndef CONFIG_CPU_ICACHE_DISABLE
mcr p15, 0, r1, c7, c5, 0 @ invalidate I cache
#endif
mcr p15, 0, r1, c7, c10, 4 @ drain WB
mcr p15, 0, r0, c2, c0, 0 @ load page table pointer
mcr p15, 0, r1, c8, c7, 0 @ invalidate I & D TLBs
mov pc, lr
/*
* cpu_arm1026_set_pte(ptep, pte)
*
* Set a PTE and flush it out
*/
.align 5
ENTRY(cpu_arm1026_set_pte)
str r1, [r0], #-2048 @ linux version
eor r1, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY
bic r2, r1, #PTE_SMALL_AP_MASK
bic r2, r2, #PTE_TYPE_MASK
orr r2, r2, #PTE_TYPE_SMALL
tst r1, #L_PTE_USER @ User?
orrne r2, r2, #PTE_SMALL_AP_URO_SRW
tst r1, #L_PTE_WRITE | L_PTE_DIRTY @ Write and Dirty?
orreq r2, r2, #PTE_SMALL_AP_UNO_SRW
tst r1, #L_PTE_PRESENT | L_PTE_YOUNG @ Present and Young?
movne r2, #0
#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
eor r3, r1, #0x0a @ C & small page?
tst r3, #0x0b
biceq r2, r2, #4
#endif
str r2, [r0] @ hardware version
mov r0, r0
#ifndef CONFIG_CPU_DCACHE_DISABLE
mcr p15, 0, r0, c7, c10, 1 @ clean D entry
#endif
mov pc, lr
__INIT
__arm1026_setup:
mov r0, #0
mcr p15, 0, r0, c7, c7 @ invalidate I,D caches on v4
mcr p15, 0, r0, c7, c10, 4 @ drain write buffer on v4
mcr p15, 0, r0, c8, c7 @ invalidate I,D TLBs on v4
mcr p15, 0, r4, c2, c0 @ load page table pointer
#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
mov r0, #4 @ explicitly disable writeback
mcr p15, 7, r0, c15, c0, 0
#endif
mov r0, #0x1f @ Domains 0, 1 = client
mcr p15, 0, r0, c3, c0 @ load domain access register
mrc p15, 0, r0, c1, c0 @ get control register v4
/*
* Clear out 'unwanted' bits (then put them in if we need them)
*/
bic r0, r0, #0x1e00 @ ...i??r.........
bic r0, r0, #0x000e @ ............wca.
/*
* Turn on what we want
*/
orr r0, r0, #0x0031 @ ..........DP...M
orr r0, r0, #0x2100 @ ..V....S........
#ifdef CONFIG_CPU_CACHE_ROUND_ROBIN
orr r0, r0, #0x4000 @ .R..............
#endif
#ifndef CONFIG_CPU_BPREDICT_DISABLE
orr r0, r0, #0x0800 @ ....Z...........
#endif
#ifndef CONFIG_CPU_DCACHE_DISABLE
orr r0, r0, #0x0004 @ .............C..
#endif
#ifndef CONFIG_CPU_ICACHE_DISABLE
orr r0, r0, #0x1000 @ ...I............
#endif
mov pc, lr
/*
* Purpose : Function pointers used to access above functions - all calls
* come through these
*/
.type arm1026_processor_functions, #object
arm1026_processor_functions:
.word ev5t_early_abort
.word cpu_arm1026_proc_init
.word cpu_arm1026_proc_fin
.word cpu_arm1026_reset
.word cpu_arm1026_do_idle
.word cpu_arm1026_dcache_clean_area
.word cpu_arm1026_switch_mm
.word cpu_arm1026_set_pte
.size arm1026_processor_functions, . - arm1026_processor_functions
.section .rodata
.type cpu_arch_name, #object
cpu_arch_name:
.asciz "armv5tej"
.size cpu_arch_name, . - cpu_arch_name
.type cpu_elf_name, #object
cpu_elf_name:
.asciz "v5"
.size cpu_elf_name, . - cpu_elf_name
.align
.type cpu_arm1026_name, #object
ENTRY(cpu_arm1026_name)
.ascii "ARM1026EJ-S"
#ifndef CONFIG_CPU_ICACHE_DISABLE
.ascii "i"
#endif
#ifndef CONFIG_CPU_DCACHE_DISABLE
.ascii "d"
#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
.ascii "(wt)"
#else
.ascii "(wb)"
#endif
#endif
#ifndef CONFIG_CPU_BPREDICT_DISABLE
.ascii "B"
#endif
#ifdef CONFIG_CPU_CACHE_ROUND_ROBIN
.ascii "RR"
#endif
.ascii "\0"
.align
.size cpu_arm1026_name, . - cpu_arm1026_name
.section ".proc.info", #alloc, #execinstr
.type __arm1026_proc_info,#object
__arm1026_proc_info:
.long 0x4106a260 @ ARM 1026EJ-S (v5TEJ)
.long 0xff0ffff0
.long 0x00000c12 @ mmuflags
b __arm1026_setup
.long cpu_arch_name
.long cpu_elf_name
.long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB | HWCAP_FAST_MULT
.long cpu_arm1026_name
.long arm1026_processor_functions
.long v4wbi_tlb_fns
.long v4wb_user_fns
.long arm1026_cache_fns
.size __arm1026_proc_info, . - __arm1026_proc_info
......@@ -14,6 +14,7 @@
#include <linux/init.h>
#include <asm/assembler.h>
#include <asm/constants.h>
#include <asm/pgtable.h>
#include <asm/procinfo.h>
#include <asm/ptrace.h>
......@@ -214,19 +215,19 @@ ENTRY(cpu_arm6_set_pte)
ENTRY(cpu_arm7_set_pte)
str r1, [r0], #-2048 @ linux version
eor r1, r1, #LPTE_PRESENT | LPTE_YOUNG | LPTE_WRITE | LPTE_DIRTY
eor r1, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY
bic r2, r1, #0xff0
bic r2, r2, #3
orr r2, r2, #HPTE_TYPE_SMALL
bic r2, r1, #PTE_SMALL_AP_MASK
bic r2, r2, #PTE_TYPE_MASK
orr r2, r2, #PTE_TYPE_SMALL
tst r1, #LPTE_USER | LPTE_EXEC @ User or Exec?
orrne r2, r2, #HPTE_AP_READ
tst r1, #L_PTE_USER @ User?
orrne r2, r2, #PTE_SMALL_AP_URO_SRW
tst r1, #LPTE_WRITE | LPTE_DIRTY @ Write and Dirty?
orreq r2, r2, #HPTE_AP_WRITE
tst r1, #L_PTE_WRITE | L_PTE_DIRTY @ Write and Dirty?
orreq r2, r2, #PTE_SMALL_AP_UNO_SRW
tst r1, #LPTE_PRESENT | LPTE_YOUNG @ Present and Young
tst r1, #L_PTE_PRESENT | L_PTE_YOUNG @ Present and Young
movne r2, #0
str r2, [r0] @ hardware version
......
......@@ -34,6 +34,7 @@
#include <linux/init.h>
#include <asm/assembler.h>
#include <asm/constants.h>
#include <asm/pgtable.h>
#include <asm/procinfo.h>
#include <asm/ptrace.h>
#include <asm/hardware.h>
......@@ -90,19 +91,19 @@ ENTRY(cpu_arm720_switch_mm)
ENTRY(cpu_arm720_set_pte)
str r1, [r0], #-2048 @ linux version
eor r1, r1, #LPTE_PRESENT | LPTE_YOUNG | LPTE_WRITE | LPTE_DIRTY
eor r1, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY
bic r2, r1, #0xff0
bic r2, r2, #3
orr r2, r2, #HPTE_TYPE_SMALL
bic r2, r1, #PTE_SMALL_AP_MASK
bic r2, r2, #PTE_TYPE_MASK
orr r2, r2, #PTE_TYPE_SMALL
tst r1, #LPTE_USER @ User?
orrne r2, r2, #HPTE_AP_READ
tst r1, #L_PTE_USER @ User?
orrne r2, r2, #PTE_SMALL_AP_URO_SRW
tst r1, #LPTE_WRITE | LPTE_DIRTY @ Write and Dirty?
orreq r2, r2, #HPTE_AP_WRITE
tst r1, #L_PTE_WRITE | L_PTE_DIRTY @ Write and Dirty?
orreq r2, r2, #PTE_SMALL_AP_UNO_SRW
tst r1, #LPTE_PRESENT | LPTE_YOUNG @ Present and Young
tst r1, #L_PTE_PRESENT | L_PTE_YOUNG @ Present and Young
movne r2, #0
str r2, [r0] @ hardware version
......
/*
* linux/arch/arm/mm/arm920.S: MMU functions for ARM920
* linux/arch/arm/mm/proc-arm920.S: MMU functions for ARM920
*
* Copyright (C) 1999,2000 ARM Limited
* Copyright (C) 2000 Deep Blue Solutions Ltd.
......@@ -28,6 +28,7 @@
#include <linux/config.h>
#include <linux/init.h>
#include <asm/assembler.h>
#include <asm/pgtable.h>
#include <asm/procinfo.h>
#include <asm/hardware.h>
#include <asm/page.h>
......@@ -333,19 +334,19 @@ ENTRY(cpu_arm920_switch_mm)
ENTRY(cpu_arm920_set_pte)
str r1, [r0], #-2048 @ linux version
eor r1, r1, #LPTE_PRESENT | LPTE_YOUNG | LPTE_WRITE | LPTE_DIRTY
eor r1, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY
bic r2, r1, #0xff0
bic r2, r2, #3
orr r2, r2, #HPTE_TYPE_SMALL
bic r2, r1, #PTE_SMALL_AP_MASK
bic r2, r2, #PTE_TYPE_MASK
orr r2, r2, #PTE_TYPE_SMALL
tst r1, #LPTE_USER @ User or Exec?
orrne r2, r2, #HPTE_AP_READ
tst r1, #L_PTE_USER @ User?
orrne r2, r2, #PTE_SMALL_AP_URO_SRW
tst r1, #LPTE_WRITE | LPTE_DIRTY @ Write and Dirty?
orreq r2, r2, #HPTE_AP_WRITE
tst r1, #L_PTE_WRITE | L_PTE_DIRTY @ Write and Dirty?
orreq r2, r2, #PTE_SMALL_AP_UNO_SRW
tst r1, #LPTE_PRESENT | LPTE_YOUNG @ Present and Young?
tst r1, #L_PTE_PRESENT | L_PTE_YOUNG @ Present and Young?
movne r2, #0
#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
......@@ -361,7 +362,7 @@ ENTRY(cpu_arm920_set_pte)
ENTRY(cpu_arm920_name)
.ascii "Arm920T"
.ascii "ARM920T"
#ifndef CONFIG_CPU_ICACHE_DISABLE
.ascii "i"
#endif
......
/*
* linux/arch/arm/mm/arm922.S: MMU functions for ARM922
* linux/arch/arm/mm/proc-arm922.S: MMU functions for ARM922
*
* Copyright (C) 1999,2000 ARM Limited
* Copyright (C) 2000 Deep Blue Solutions Ltd.
......@@ -29,6 +29,7 @@
#include <linux/config.h>
#include <linux/init.h>
#include <asm/assembler.h>
#include <asm/pgtable.h>
#include <asm/procinfo.h>
#include <asm/hardware.h>
#include <asm/page.h>
......@@ -337,19 +338,19 @@ ENTRY(cpu_arm922_switch_mm)
ENTRY(cpu_arm922_set_pte)
str r1, [r0], #-2048 @ linux version
eor r1, r1, #LPTE_PRESENT | LPTE_YOUNG | LPTE_WRITE | LPTE_DIRTY
eor r1, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY
bic r2, r1, #0xff0
bic r2, r2, #3
orr r2, r2, #HPTE_TYPE_SMALL
bic r2, r1, #PTE_SMALL_AP_MASK
bic r2, r2, #PTE_TYPE_MASK
orr r2, r2, #PTE_TYPE_SMALL
tst r1, #LPTE_USER @ User?
orrne r2, r2, #HPTE_AP_READ
tst r1, #L_PTE_USER @ User?
orrne r2, r2, #PTE_SMALL_AP_URO_SRW
tst r1, #LPTE_WRITE | LPTE_DIRTY @ Write and Dirty?
orreq r2, r2, #HPTE_AP_WRITE
tst r1, #L_PTE_WRITE | L_PTE_DIRTY @ Write and Dirty?
orreq r2, r2, #PTE_SMALL_AP_UNO_SRW
tst r1, #LPTE_PRESENT | LPTE_YOUNG @ Present and Young?
tst r1, #L_PTE_PRESENT | L_PTE_YOUNG @ Present and Young?
movne r2, #0
#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
......@@ -365,7 +366,7 @@ ENTRY(cpu_arm922_set_pte)
ENTRY(cpu_arm922_name)
.ascii "Arm922T"
.ascii "ARM922T"
#ifndef CONFIG_CPU_ICACHE_DISABLE
.ascii "i"
#endif
......
......@@ -28,6 +28,7 @@
#include <linux/config.h>
#include <linux/init.h>
#include <asm/assembler.h>
#include <asm/pgtable.h>
#include <asm/procinfo.h>
#include <asm/hardware.h>
#include <asm/page.h>
......@@ -337,19 +338,19 @@ ENTRY(cpu_arm926_switch_mm)
ENTRY(cpu_arm926_set_pte)
str r1, [r0], #-2048 @ linux version
eor r1, r1, #LPTE_PRESENT | LPTE_YOUNG | LPTE_WRITE | LPTE_DIRTY
eor r1, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY
bic r2, r1, #0xff0
bic r2, r2, #3
orr r2, r2, #HPTE_TYPE_SMALL
bic r2, r1, #PTE_SMALL_AP_MASK
bic r2, r2, #PTE_TYPE_MASK
orr r2, r2, #PTE_TYPE_SMALL
tst r1, #LPTE_USER @ User?
orrne r2, r2, #HPTE_AP_READ
tst r1, #L_PTE_USER @ User?
orrne r2, r2, #PTE_SMALL_AP_URO_SRW
tst r1, #LPTE_WRITE | LPTE_DIRTY @ Write and Dirty?
orreq r2, r2, #HPTE_AP_WRITE
tst r1, #L_PTE_WRITE | L_PTE_DIRTY @ Write and Dirty?
orreq r2, r2, #PTE_SMALL_AP_UNO_SRW
tst r1, #LPTE_PRESENT | LPTE_YOUNG @ Present and Young?
tst r1, #L_PTE_PRESENT | L_PTE_YOUNG @ Present and Young?
movne r2, #0
#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
......@@ -436,7 +437,7 @@ __arm926_setup:
*/
.type arm926_processor_functions, #object
arm926_processor_functions:
.word v5tej_early_abort
.word v5tj_early_abort
.word cpu_arm926_proc_init
.word cpu_arm926_proc_fin
.word cpu_arm926_reset
......@@ -461,8 +462,8 @@ cpu_elf_name:
.type __arm926_proc_info,#object
__arm926_proc_info:
.long 0x41009260
.long 0xff00fff0
.long 0x41069260 @ ARM926EJ-S (v5TEJ)
.long 0xff0ffff0
.long 0x00000c1e @ mmuflags
b __arm926_setup
.long cpu_arch_name
......
......@@ -163,11 +163,11 @@ ENTRY(cpu_sa110_set_pte)
eor r1, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY
bic r2, r1, #0xff0
bic r2, r2, #3
bic r2, r1, #PTE_SMALL_AP_MASK
bic r2, r2, #PTE_TYPE_MASK
orr r2, r2, #PTE_TYPE_SMALL
tst r1, #L_PTE_USER @ User or Exec?
tst r1, #L_PTE_USER @ User?
orrne r2, r2, #PTE_SMALL_AP_URO_SRW
tst r1, #L_PTE_WRITE | L_PTE_DIRTY @ Write and Dirty?
......
/*
* linux/arch/arm/mm/proc-sa110.S
* linux/arch/arm/mm/proc-sa1100.S
*
* Copyright (C) 1997-2002 Russell King
*
......@@ -187,11 +187,11 @@ ENTRY(cpu_sa1100_set_pte)
eor r1, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY
bic r2, r1, #0xff0
bic r2, r2, #3
bic r2, r1, #PTE_SMALL_AP_MASK
bic r2, r2, #PTE_TYPE_MASK
orr r2, r2, #PTE_TYPE_SMALL
tst r1, #L_PTE_USER @ User or Exec?
tst r1, #L_PTE_USER @ User?
orrne r2, r2, #PTE_SMALL_AP_URO_SRW
tst r1, #L_PTE_WRITE | L_PTE_DIRTY @ Write and Dirty?
......
......@@ -236,6 +236,9 @@ ENTRY(xscale_flush_user_cache_range)
*
* - start - virtual start address
* - end - virtual end address
*
* Note: single I-cache line invalidation isn't used here since
* it also trashes the mini I-cache used by JTAG debuggers.
*/
ENTRY(xscale_coherent_kern_range)
bic r0, r0, #CACHELINESIZE - 1
......@@ -612,7 +615,7 @@ __xscale_setup:
.type xscale_processor_functions, #object
ENTRY(xscale_processor_functions)
.word xscale_abort
.word v5t_early_abort
.word cpu_xscale_proc_init
.word cpu_xscale_proc_fin
.word cpu_xscale_reset
......
......@@ -34,7 +34,6 @@ ENTRY(v4_flush_user_tlb_range)
act_mm r3 @ get current->active_mm
eors r3, ip, r3 @ == mm ?
movne pc, lr @ no, we dont do anything
vma_vm_flags ip, r2
.v4_flush_kern_tlb_range:
bic r0, r0, #0x0ff
bic r0, r0, #0xf00
......
......@@ -58,7 +58,7 @@
# endif
#endif
#if defined(CONFIG_CPU_ARM720T)
#ifdef CONFIG_CPU_ABRT_LV4T
# ifdef CPU_ABORT_HANDLER
# define MULTI_ABORT 1
# else
......@@ -66,7 +66,7 @@
# endif
#endif
#if defined(CONFIG_CPU_SA110) || defined(CONFIG_CPU_SA1100)
#ifdef CONFIG_CPU_ABRT_EV4
# ifdef CPU_ABORT_HANDLER
# define MULTI_ABORT 1
# else
......@@ -74,8 +74,7 @@
# endif
#endif
#if defined(CONFIG_CPU_ARM920T) || defined(CONFIG_CPU_ARM922T) || \
defined(CONFIG_CPU_ARM1020)
#ifdef CONFIG_CPU_ABRT_EV4T
# ifdef CPU_ABORT_HANDLER
# define MULTI_ABORT 1
# else
......@@ -83,19 +82,19 @@
# endif
#endif
#if defined(CONFIG_CPU_ARM926T)
#ifdef CONFIG_CPU_ABRT_EV5TJ
# ifdef CPU_ABORT_HANDLER
# define MULTI_ABORT 1
# else
# define CPU_ABORT_HANDLER v5tej_early_abort
# define CPU_ABORT_HANDLER v5tj_early_abort
# endif
#endif
#if defined(CONFIG_CPU_XSCALE)
#ifdef CONFIG_CPU_ABORT_EV5T
# ifdef CPU_ABORT_HANDLER
# define MULTI_ABORT 1
# else
# define CPU_ABORT_HANDLER xscale_abort
# define CPU_ABORT_HANDLER v5t_early_abort
# endif
#endif
......
......@@ -44,7 +44,7 @@
#undef _USER
#undef MULTI_USER
#if defined(CONFIG_CPU_ARM610) || defined(CONFIG_CPU_ARM710)
#ifdef CONFIG_CPU_COPY_V3
# ifdef _USER
# define MULTI_USER 1
# else
......@@ -52,7 +52,7 @@
# endif
#endif
#if defined(CONFIG_CPU_ARM720T)
#ifdef CONFIG_CPU_COPY_V4WT
# ifdef _USER
# define MULTI_USER 1
# else
......@@ -60,9 +60,7 @@
# endif
#endif
#if defined(CONFIG_CPU_ARM920T) || defined(CONFIG_CPU_ARM922T) || \
defined(CONFIG_CPU_ARM926T) || defined(CONFIG_CPU_SA110) || \
defined(CONFIG_CPU_ARM1020)
#ifdef CONFIG_CPU_COPY_V4WB
# ifdef _USER
# define MULTI_USER 1
# else
......@@ -70,7 +68,7 @@
# endif
#endif
#if defined(CONFIG_CPU_SA1100)
#ifdef CONFIG_CPU_SA1100
# ifdef _USER
# define MULTI_USER 1
# else
......@@ -78,7 +76,7 @@
# endif
#endif
#if defined(CONFIG_CPU_XSCALE)
#ifdef CONFIG_CPU_XSCALE
# ifdef _USER
# define MULTI_USER 1
# else
......
......@@ -106,6 +106,7 @@ extern void __pgd_error(const char *file, int line, unsigned long val);
/*
* - extended small page/tiny page
*/
#define PTE_EXT_AP_MASK (3 << 4)
#define PTE_EXT_AP_UNO_SRO (0 << 4)
#define PTE_EXT_AP_UNO_SRW (1 << 4)
#define PTE_EXT_AP_URO_SRW (2 << 4)
......@@ -115,12 +116,11 @@ extern void __pgd_error(const char *file, int line, unsigned long val);
/*
* - small page
*/
#define PTE_SMALL_AP_MASK (0xff << 4)
#define PTE_SMALL_AP_UNO_SRO (0x00 << 4)
#define PTE_SMALL_AP_UNO_SRW (0x55 << 4)
#define PTE_SMALL_AP_URO_SRW (0xaa << 4)
#define PTE_SMALL_AP_URW_SRW (0xff << 4)
#define PTE_AP_READ PTE_SMALL_AP_URO_SRW
#define PTE_AP_WRITE PTE_SMALL_AP_UNO_SRW
/*
* "Linux" PTE definitions.
......
......@@ -26,7 +26,6 @@
*/
#ifdef CONFIG_CPU_32
# define CPU_INCLUDE_NAME "asm/cpu-multi32.h"
# ifdef CONFIG_CPU_ARM610
# ifdef CPU_NAME
# undef MULTI_CPU
......@@ -99,6 +98,30 @@
# define CPU_NAME cpu_arm1020
# endif
# endif
# ifdef CONFIG_CPU_ARM1020E
# ifdef CPU_NAME
# undef MULTI_CPU
# define MULTI_CPU
# else
# define CPU_NAME cpu_arm1020e
# endif
# endif
# ifdef CONFIG_CPU_ARM1022
# ifdef CPU_NAME
# undef MULTI_CPU
# define MULTI_CPU
# else
# define CPU_NAME cpu_arm1022
# endif
# endif
# ifdef CONFIG_CPU_ARM1026
# ifdef CPU_NAME
# undef MULTI_CPU
# define MULTI_CPU
# else
# define CPU_NAME cpu_arm1026
# endif
# endif
# ifdef CONFIG_CPU_XSCALE
# ifdef CPU_NAME
# undef MULTI_CPU
......@@ -110,11 +133,10 @@
#endif
#ifndef MULTI_CPU
#undef CPU_INCLUDE_NAME
#define CPU_INCLUDE_NAME "asm/cpu-single.h"
#include "asm/cpu-single.h"
#else
#include "asm/cpu-multi32.h"
#endif
#include CPU_INCLUDE_NAME
#endif /* __KERNEL__ */
#endif /* __ASM_PROCFNS_H */
......@@ -52,7 +52,7 @@
#define v3_tlb_flags (TLB_V3_FULL | TLB_V3_PAGE)
#if defined(CONFIG_CPU_ARM610) || defined(CONFIG_CPU_ARM710)
#ifdef CONFIG_CPU_TLB_V3
# define v3_possible_flags v3_tlb_flags
# define v3_always_flags v3_tlb_flags
# ifdef _TLB
......@@ -67,7 +67,7 @@
#define v4_tlb_flags (TLB_V4_U_FULL | TLB_V4_U_PAGE)
#if defined(CONFIG_CPU_ARM720T)
#ifdef CONFIG_CPU_TLB_V4WT
# define v4_possible_flags v4_tlb_flags
# define v4_always_flags v4_tlb_flags
# ifdef _TLB
......@@ -84,9 +84,7 @@
TLB_V4_I_FULL | TLB_V4_D_FULL | \
TLB_V4_I_PAGE | TLB_V4_D_PAGE)
#if defined(CONFIG_CPU_ARM920T) || defined(CONFIG_CPU_ARM922T) || \
defined(CONFIG_CPU_ARM926T) || defined(CONFIG_CPU_ARM1020) || \
defined(CONFIG_CPU_XSCALE)
#ifdef CONFIG_CPU_TLB_V4WBI
# define v4wbi_possible_flags v4wbi_tlb_flags
# define v4wbi_always_flags v4wbi_tlb_flags
# ifdef _TLB
......@@ -103,7 +101,7 @@
TLB_V4_I_FULL | TLB_V4_D_FULL | \
TLB_V4_D_PAGE)
#if defined(CONFIG_CPU_SA110) || defined(CONFIG_CPU_SA1100)
#ifdef CONFIG_CPU_TLB_V4WB
# define v4wb_possible_flags v4wb_tlb_flags
# define v4wb_always_flags v4wb_tlb_flags
# ifdef _TLB
......@@ -121,7 +119,7 @@
TLB_V6_I_PAGE | TLB_V6_D_PAGE | \
TLB_V6_I_ASID | TLB_V6_D_ASID)
#if defined(CONFIG_CPU_V6)
#ifdef CONFIG_CPU_TLB_V6
# define v6wbi_possible_flags v6wbi_tlb_flags
# define v6wbi_always_flags v6wbi_tlb_flags
# ifdef _TLB
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment