Commit d675d0bc authored by Will Deacon's avatar Will Deacon Committed by Catalin Marinas

ARM: LPAE: add ISBs around MMU enabling code

Before we enable the MMU, we must ensure that the TTBR registers contain
sane values. After the MMU has been enabled, we jump to the *virtual*
address of the following function, so we also need to ensure that the
SCTLR write has taken effect.

This patch adds ISB instructions around the SCTLR write to ensure the
visibility of the above.
Signed-off-by: default avatarWill Deacon <will.deacon@arm.com>
Signed-off-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
parent 8d2cd3a3
......@@ -659,6 +659,7 @@ __armv7_mmu_cache_on:
mcrne p15, 0, r3, c2, c0, 0 @ load page table pointer
mcrne p15, 0, r1, c3, c0, 0 @ load domain access control
#endif
mcr p15, 0, r0, c7, c5, 4 @ ISB
mcr p15, 0, r0, c1, c0, 0 @ load control register
mrc p15, 0, r0, c1, c0, 0 @ and read it back
mov r0, #0
......
......@@ -186,6 +186,17 @@
#define ALT_UP_B(label) b label
#endif
/*
* Instruction barrier
*/
.macro instr_sync
#if __LINUX_ARM_ARCH__ >= 7
isb
#elif __LINUX_ARM_ARCH__ == 6
mcr p15, 0, r0, c7, c5, 4
#endif
.endm
/*
* SMP data memory barrier
*/
......
......@@ -401,8 +401,10 @@ ENDPROC(__enable_mmu)
.pushsection .idmap.text, "ax"
ENTRY(__turn_mmu_on)
mov r0, r0
instr_sync
mcr p15, 0, r0, c1, c0, 0 @ write control reg
mrc p15, 0, r3, c0, c0, 0 @ read id reg
instr_sync
mov r3, r3
mov r3, r13
mov pc, r3
......
......@@ -57,8 +57,10 @@ ENDPROC(cpu_suspend_abort)
.pushsection .idmap.text,"ax"
ENTRY(cpu_resume_mmu)
ldr r3, =cpu_resume_after_mmu
instr_sync
mcr p15, 0, r0, c1, c0, 0 @ turn on MMU, I-cache, etc
mrc p15, 0, r0, c0, c0, 0 @ read id reg
instr_sync
mov r0, r0
mov r0, r0
mov pc, r3 @ jump to virtual address
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment