Commit 347c8b70 authored by Catalin Marinas's avatar Catalin Marinas

Thumb-2: Implement the unified arch/arm/mm support

This patch adds the ARM/Thumb-2 unified support to the arch/arm/mm/*
files.
Signed-off-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
parent b86040a5
......@@ -159,7 +159,9 @@ union offset_union {
#define __get8_unaligned_check(ins,val,addr,err) \
__asm__( \
"1: "ins" %1, [%2], #1\n" \
ARM( "1: "ins" %1, [%2], #1\n" ) \
THUMB( "1: "ins" %1, [%2]\n" ) \
THUMB( " add %2, %2, #1\n" ) \
"2:\n" \
" .section .fixup,\"ax\"\n" \
" .align 2\n" \
......@@ -215,7 +217,9 @@ union offset_union {
do { \
unsigned int err = 0, v = val, a = addr; \
__asm__( FIRST_BYTE_16 \
"1: "ins" %1, [%2], #1\n" \
ARM( "1: "ins" %1, [%2], #1\n" ) \
THUMB( "1: "ins" %1, [%2]\n" ) \
THUMB( " add %2, %2, #1\n" ) \
" mov %1, %1, "NEXT_BYTE"\n" \
"2: "ins" %1, [%2]\n" \
"3:\n" \
......@@ -245,11 +249,17 @@ union offset_union {
do { \
unsigned int err = 0, v = val, a = addr; \
__asm__( FIRST_BYTE_32 \
"1: "ins" %1, [%2], #1\n" \
ARM( "1: "ins" %1, [%2], #1\n" ) \
THUMB( "1: "ins" %1, [%2]\n" ) \
THUMB( " add %2, %2, #1\n" ) \
" mov %1, %1, "NEXT_BYTE"\n" \
"2: "ins" %1, [%2], #1\n" \
ARM( "2: "ins" %1, [%2], #1\n" ) \
THUMB( "2: "ins" %1, [%2]\n" ) \
THUMB( " add %2, %2, #1\n" ) \
" mov %1, %1, "NEXT_BYTE"\n" \
"3: "ins" %1, [%2], #1\n" \
ARM( "3: "ins" %1, [%2], #1\n" ) \
THUMB( "3: "ins" %1, [%2]\n" ) \
THUMB( " add %2, %2, #1\n" ) \
" mov %1, %1, "NEXT_BYTE"\n" \
"4: "ins" %1, [%2]\n" \
"5:\n" \
......
......@@ -21,7 +21,7 @@
*
* Flush the whole D-cache.
*
* Corrupted registers: r0-r5, r7, r9-r11
* Corrupted registers: r0-r7, r9-r11 (r6 only in Thumb mode)
*
* - mm - mm_struct describing address space
*/
......@@ -51,8 +51,12 @@ loop1:
loop2:
mov r9, r4 @ create working copy of max way size
loop3:
orr r11, r10, r9, lsl r5 @ factor way and cache number into r11
orr r11, r11, r7, lsl r2 @ factor index number into r11
ARM( orr r11, r10, r9, lsl r5 ) @ factor way and cache number into r11
THUMB( lsl r6, r9, r5 )
THUMB( orr r11, r10, r6 ) @ factor way and cache number into r11
ARM( orr r11, r11, r7, lsl r2 ) @ factor index number into r11
THUMB( lsl r6, r7, r2 )
THUMB( orr r11, r11, r6 ) @ factor index number into r11
mcr p15, 0, r11, c7, c14, 2 @ clean & invalidate by set/way
subs r9, r9, #1 @ decrement the way
bge loop3
......@@ -82,11 +86,13 @@ ENDPROC(v7_flush_dcache_all)
*
*/
ENTRY(v7_flush_kern_cache_all)
stmfd sp!, {r4-r5, r7, r9-r11, lr}
ARM( stmfd sp!, {r4-r5, r7, r9-r11, lr} )
THUMB( stmfd sp!, {r4-r7, r9-r11, lr} )
bl v7_flush_dcache_all
mov r0, #0
mcr p15, 0, r0, c7, c5, 0 @ I+BTB cache invalidate
ldmfd sp!, {r4-r5, r7, r9-r11, lr}
ARM( ldmfd sp!, {r4-r5, r7, r9-r11, lr} )
THUMB( ldmfd sp!, {r4-r7, r9-r11, lr} )
mov pc, lr
ENDPROC(v7_flush_kern_cache_all)
......
......@@ -127,7 +127,9 @@ ENDPROC(cpu_v7_switch_mm)
*/
ENTRY(cpu_v7_set_pte_ext)
#ifdef CONFIG_MMU
str r1, [r0], #-2048 @ linux version
ARM( str r1, [r0], #-2048 ) @ linux version
THUMB( str r1, [r0] ) @ linux version
THUMB( sub r0, r0, #2048 )
bic r3, r1, #0x000003f0
bic r3, r3, #PTE_TYPE_MASK
......@@ -273,6 +275,7 @@ __v7_setup:
mrc p15, 0, r0, c1, c0, 0 @ read control register
bic r0, r0, r5 @ clear bits them
orr r0, r0, r6 @ set them
THUMB( orr r0, r0, #1 << 30 ) @ Thumb exceptions
mov pc, lr @ return to head.S:__ret
ENDPROC(__v7_setup)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment