Commit e6a5d66f authored by Catalin Marinas's avatar Catalin Marinas Committed by Russell King

[ARM] 4129/1: Add barriers after the TLB operations

The architecture specification states that TLB operations are
guaranteed to be complete only after the execution of a DSB (Data
Synchronisation Barrier, former Data Write Barrier or Drain Write
Buffer). The branch target cache invalidation is also needed. The ISB
(Instruction Synchronisation Barrier, formerly Prefetch Flush) is
needed unless there will be a return from exception before the
corresponding mapping is used (i.e. user mappings).
Signed-off-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
Signed-off-by: default avatarRussell King <rmk+kernel@arm.linux.org.uk>
parent 9d99df4b
...@@ -53,6 +53,8 @@ ENTRY(v6wbi_flush_user_tlb_range) ...@@ -53,6 +53,8 @@ ENTRY(v6wbi_flush_user_tlb_range)
add r0, r0, #PAGE_SZ add r0, r0, #PAGE_SZ
cmp r0, r1 cmp r0, r1
blo 1b blo 1b
mcr p15, 0, ip, c7, c5, 6 @ flush BTAC/BTB
mcr p15, 0, ip, c7, c10, 4 @ data synchronization barrier
mov pc, lr mov pc, lr
/* /*
...@@ -80,7 +82,9 @@ ENTRY(v6wbi_flush_kern_tlb_range) ...@@ -80,7 +82,9 @@ ENTRY(v6wbi_flush_kern_tlb_range)
add r0, r0, #PAGE_SZ add r0, r0, #PAGE_SZ
cmp r0, r1 cmp r0, r1
blo 1b blo 1b
mcr p15, 0, r2, c7, c5, 6 @ flush BTAC/BTB
mcr p15, 0, r2, c7, c10, 4 @ data synchronization barrier mcr p15, 0, r2, c7, c10, 4 @ data synchronization barrier
mcr p15, 0, r2, c7, c5, 4 @ prefetch flush
mov pc, lr mov pc, lr
.section ".text.init", #alloc, #execinstr .section ".text.init", #alloc, #execinstr
......
...@@ -247,7 +247,7 @@ static inline void local_flush_tlb_all(void) ...@@ -247,7 +247,7 @@ static inline void local_flush_tlb_all(void)
const unsigned int __tlb_flag = __cpu_tlb_flags; const unsigned int __tlb_flag = __cpu_tlb_flags;
if (tlb_flag(TLB_WB)) if (tlb_flag(TLB_WB))
asm("mcr p15, 0, %0, c7, c10, 4" : : "r" (zero) : "cc"); dsb();
if (tlb_flag(TLB_V3_FULL)) if (tlb_flag(TLB_V3_FULL))
asm("mcr p15, 0, %0, c6, c0, 0" : : "r" (zero) : "cc"); asm("mcr p15, 0, %0, c6, c0, 0" : : "r" (zero) : "cc");
...@@ -257,6 +257,15 @@ static inline void local_flush_tlb_all(void) ...@@ -257,6 +257,15 @@ static inline void local_flush_tlb_all(void)
asm("mcr p15, 0, %0, c8, c6, 0" : : "r" (zero) : "cc"); asm("mcr p15, 0, %0, c8, c6, 0" : : "r" (zero) : "cc");
if (tlb_flag(TLB_V4_I_FULL | TLB_V6_I_FULL)) if (tlb_flag(TLB_V4_I_FULL | TLB_V6_I_FULL))
asm("mcr p15, 0, %0, c8, c5, 0" : : "r" (zero) : "cc"); asm("mcr p15, 0, %0, c8, c5, 0" : : "r" (zero) : "cc");
if (tlb_flag(TLB_V6_I_FULL | TLB_V6_D_FULL |
TLB_V6_I_PAGE | TLB_V6_D_PAGE |
TLB_V6_I_ASID | TLB_V6_D_ASID)) {
/* flush the branch target cache */
asm("mcr p15, 0, %0, c7, c5, 6" : : "r" (zero) : "cc");
dsb();
isb();
}
} }
static inline void local_flush_tlb_mm(struct mm_struct *mm) static inline void local_flush_tlb_mm(struct mm_struct *mm)
...@@ -266,7 +275,7 @@ static inline void local_flush_tlb_mm(struct mm_struct *mm) ...@@ -266,7 +275,7 @@ static inline void local_flush_tlb_mm(struct mm_struct *mm)
const unsigned int __tlb_flag = __cpu_tlb_flags; const unsigned int __tlb_flag = __cpu_tlb_flags;
if (tlb_flag(TLB_WB)) if (tlb_flag(TLB_WB))
asm("mcr p15, 0, %0, c7, c10, 4" : : "r" (zero) : "cc"); dsb();
if (cpu_isset(smp_processor_id(), mm->cpu_vm_mask)) { if (cpu_isset(smp_processor_id(), mm->cpu_vm_mask)) {
if (tlb_flag(TLB_V3_FULL)) if (tlb_flag(TLB_V3_FULL))
...@@ -285,6 +294,14 @@ static inline void local_flush_tlb_mm(struct mm_struct *mm) ...@@ -285,6 +294,14 @@ static inline void local_flush_tlb_mm(struct mm_struct *mm)
asm("mcr p15, 0, %0, c8, c6, 2" : : "r" (asid) : "cc"); asm("mcr p15, 0, %0, c8, c6, 2" : : "r" (asid) : "cc");
if (tlb_flag(TLB_V6_I_ASID)) if (tlb_flag(TLB_V6_I_ASID))
asm("mcr p15, 0, %0, c8, c5, 2" : : "r" (asid) : "cc"); asm("mcr p15, 0, %0, c8, c5, 2" : : "r" (asid) : "cc");
if (tlb_flag(TLB_V6_I_FULL | TLB_V6_D_FULL |
TLB_V6_I_PAGE | TLB_V6_D_PAGE |
TLB_V6_I_ASID | TLB_V6_D_ASID)) {
/* flush the branch target cache */
asm("mcr p15, 0, %0, c7, c5, 6" : : "r" (zero) : "cc");
dsb();
}
} }
static inline void static inline void
...@@ -296,7 +313,7 @@ local_flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr) ...@@ -296,7 +313,7 @@ local_flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
uaddr = (uaddr & PAGE_MASK) | ASID(vma->vm_mm); uaddr = (uaddr & PAGE_MASK) | ASID(vma->vm_mm);
if (tlb_flag(TLB_WB)) if (tlb_flag(TLB_WB))
asm("mcr p15, 0, %0, c7, c10, 4" : : "r" (zero)); dsb();
if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask)) { if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask)) {
if (tlb_flag(TLB_V3_PAGE)) if (tlb_flag(TLB_V3_PAGE))
...@@ -317,6 +334,14 @@ local_flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr) ...@@ -317,6 +334,14 @@ local_flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
asm("mcr p15, 0, %0, c8, c6, 1" : : "r" (uaddr) : "cc"); asm("mcr p15, 0, %0, c8, c6, 1" : : "r" (uaddr) : "cc");
if (tlb_flag(TLB_V6_I_PAGE)) if (tlb_flag(TLB_V6_I_PAGE))
asm("mcr p15, 0, %0, c8, c5, 1" : : "r" (uaddr) : "cc"); asm("mcr p15, 0, %0, c8, c5, 1" : : "r" (uaddr) : "cc");
if (tlb_flag(TLB_V6_I_FULL | TLB_V6_D_FULL |
TLB_V6_I_PAGE | TLB_V6_D_PAGE |
TLB_V6_I_ASID | TLB_V6_D_ASID)) {
/* flush the branch target cache */
asm("mcr p15, 0, %0, c7, c5, 6" : : "r" (zero) : "cc");
dsb();
}
} }
static inline void local_flush_tlb_kernel_page(unsigned long kaddr) static inline void local_flush_tlb_kernel_page(unsigned long kaddr)
...@@ -327,7 +352,7 @@ static inline void local_flush_tlb_kernel_page(unsigned long kaddr) ...@@ -327,7 +352,7 @@ static inline void local_flush_tlb_kernel_page(unsigned long kaddr)
kaddr &= PAGE_MASK; kaddr &= PAGE_MASK;
if (tlb_flag(TLB_WB)) if (tlb_flag(TLB_WB))
asm("mcr p15, 0, %0, c7, c10, 4" : : "r" (zero) : "cc"); dsb();
if (tlb_flag(TLB_V3_PAGE)) if (tlb_flag(TLB_V3_PAGE))
asm("mcr p15, 0, %0, c6, c0, 0" : : "r" (kaddr) : "cc"); asm("mcr p15, 0, %0, c6, c0, 0" : : "r" (kaddr) : "cc");
...@@ -347,11 +372,14 @@ static inline void local_flush_tlb_kernel_page(unsigned long kaddr) ...@@ -347,11 +372,14 @@ static inline void local_flush_tlb_kernel_page(unsigned long kaddr)
if (tlb_flag(TLB_V6_I_PAGE)) if (tlb_flag(TLB_V6_I_PAGE))
asm("mcr p15, 0, %0, c8, c5, 1" : : "r" (kaddr) : "cc"); asm("mcr p15, 0, %0, c8, c5, 1" : : "r" (kaddr) : "cc");
/* The ARM ARM states that the completion of a TLB maintenance if (tlb_flag(TLB_V6_I_FULL | TLB_V6_D_FULL |
* operation is only guaranteed by a DSB instruction TLB_V6_I_PAGE | TLB_V6_D_PAGE |
*/ TLB_V6_I_ASID | TLB_V6_D_ASID)) {
if (tlb_flag(TLB_V6_U_PAGE | TLB_V6_D_PAGE | TLB_V6_I_PAGE)) /* flush the branch target cache */
asm("mcr p15, 0, %0, c7, c10, 4" : : "r" (zero) : "cc"); asm("mcr p15, 0, %0, c7, c5, 6" : : "r" (zero) : "cc");
dsb();
isb();
}
} }
/* /*
...@@ -369,15 +397,13 @@ static inline void local_flush_tlb_kernel_page(unsigned long kaddr) ...@@ -369,15 +397,13 @@ static inline void local_flush_tlb_kernel_page(unsigned long kaddr)
*/ */
static inline void flush_pmd_entry(pmd_t *pmd) static inline void flush_pmd_entry(pmd_t *pmd)
{ {
const unsigned int zero = 0;
const unsigned int __tlb_flag = __cpu_tlb_flags; const unsigned int __tlb_flag = __cpu_tlb_flags;
if (tlb_flag(TLB_DCLEAN)) if (tlb_flag(TLB_DCLEAN))
asm("mcr p15, 0, %0, c7, c10, 1 @ flush_pmd" asm("mcr p15, 0, %0, c7, c10, 1 @ flush_pmd"
: : "r" (pmd) : "cc"); : : "r" (pmd) : "cc");
if (tlb_flag(TLB_WB)) if (tlb_flag(TLB_WB))
asm("mcr p15, 0, %0, c7, c10, 4 @ flush_pmd" dsb();
: : "r" (zero) : "cc");
} }
static inline void clean_pmd_entry(pmd_t *pmd) static inline void clean_pmd_entry(pmd_t *pmd)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment