Commit 3b756ccd authored by Guo Ren's avatar Guo Ren

csky: Fix TLB maintenance synchronization problem

TLB invalidate didn't contain a barrier operation in csky cpu and
we need to prevent previous PTW response after TLB invalidation
instruction. Of cause, the ASID changing also needs to take care
of the issue.

CPU0                    CPU1
===============         ===============
set_pte
sync_is()        ->     See the previous set_pte for all harts
tlbi.vas         ->     Invalidate all harts TLB entry & flush pipeline
Signed-off-by: default avatarGuo Ren <guoren@linux.alibaba.com>
parent c109f424
...@@ -89,9 +89,10 @@ static inline void tlb_invalid_indexed(void) ...@@ -89,9 +89,10 @@ static inline void tlb_invalid_indexed(void)
cpwcr("cpcr8", 0x02000000); cpwcr("cpcr8", 0x02000000);
} }
static inline void setup_pgd(pgd_t *pgd) static inline void setup_pgd(pgd_t *pgd, int asid)
{ {
cpwcr("cpcr29", __pa(pgd) | BIT(0)); cpwcr("cpcr29", __pa(pgd) | BIT(0));
write_mmu_entryhi(asid);
} }
static inline pgd_t *get_pgd(void) static inline pgd_t *get_pgd(void)
......
...@@ -78,8 +78,13 @@ static inline void tlb_read(void) ...@@ -78,8 +78,13 @@ static inline void tlb_read(void)
static inline void tlb_invalid_all(void) static inline void tlb_invalid_all(void)
{ {
#ifdef CONFIG_CPU_HAS_TLBI #ifdef CONFIG_CPU_HAS_TLBI
asm volatile("tlbi.alls\n":::"memory");
sync_is(); sync_is();
asm volatile(
"tlbi.alls \n"
"sync.i \n"
:
:
: "memory");
#else #else
mtcr("cr<8, 15>", 0x04000000); mtcr("cr<8, 15>", 0x04000000);
#endif #endif
...@@ -88,8 +93,13 @@ static inline void tlb_invalid_all(void) ...@@ -88,8 +93,13 @@ static inline void tlb_invalid_all(void)
static inline void local_tlb_invalid_all(void) static inline void local_tlb_invalid_all(void)
{ {
#ifdef CONFIG_CPU_HAS_TLBI #ifdef CONFIG_CPU_HAS_TLBI
asm volatile("tlbi.all\n":::"memory");
sync_is(); sync_is();
asm volatile(
"tlbi.all \n"
"sync.i \n"
:
:
: "memory");
#else #else
tlb_invalid_all(); tlb_invalid_all();
#endif #endif
...@@ -100,12 +110,27 @@ static inline void tlb_invalid_indexed(void) ...@@ -100,12 +110,27 @@ static inline void tlb_invalid_indexed(void)
mtcr("cr<8, 15>", 0x02000000); mtcr("cr<8, 15>", 0x02000000);
} }
static inline void setup_pgd(pgd_t *pgd) #define NOP32 ".long 0x4820c400\n"
static inline void setup_pgd(pgd_t *pgd, int asid)
{ {
#ifdef CONFIG_CPU_HAS_TLBI #ifdef CONFIG_CPU_HAS_TLBI
mtcr("cr<28, 15>", __pa(pgd) | BIT(0)); sync_is();
#else
mb();
#endif
asm volatile(
#ifdef CONFIG_CPU_HAS_TLBI
"mtcr %1, cr<28, 15> \n"
#endif #endif
mtcr("cr<29, 15>", __pa(pgd) | BIT(0)); "mtcr %1, cr<29, 15> \n"
"mtcr %0, cr< 4, 15> \n"
".rept 64 \n"
NOP32
".endr \n"
:
:"r"(asid), "r"(__pa(pgd) | BIT(0))
:"memory");
} }
static inline pgd_t *get_pgd(void) static inline pgd_t *get_pgd(void)
......
...@@ -30,8 +30,7 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next, ...@@ -30,8 +30,7 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next,
if (prev != next) if (prev != next)
check_and_switch_context(next, cpu); check_and_switch_context(next, cpu);
setup_pgd(next->pgd); setup_pgd(next->pgd, next->context.asid.counter);
write_mmu_entryhi(next->context.asid.counter);
flush_icache_deferred(next); flush_icache_deferred(next);
} }
......
...@@ -164,7 +164,7 @@ void __init mmu_init(unsigned long min_pfn, unsigned long max_pfn) ...@@ -164,7 +164,7 @@ void __init mmu_init(unsigned long min_pfn, unsigned long max_pfn)
/* Setup page mask to 4k */ /* Setup page mask to 4k */
write_mmu_pagemask(0); write_mmu_pagemask(0);
setup_pgd(swapper_pg_dir); setup_pgd(swapper_pg_dir, 0);
} }
void __init fixrange_init(unsigned long start, unsigned long end, void __init fixrange_init(unsigned long start, unsigned long end,
......
...@@ -24,7 +24,13 @@ void flush_tlb_all(void) ...@@ -24,7 +24,13 @@ void flush_tlb_all(void)
void flush_tlb_mm(struct mm_struct *mm) void flush_tlb_mm(struct mm_struct *mm)
{ {
#ifdef CONFIG_CPU_HAS_TLBI #ifdef CONFIG_CPU_HAS_TLBI
asm volatile("tlbi.asids %0"::"r"(cpu_asid(mm))); sync_is();
asm volatile(
"tlbi.asids %0 \n"
"sync.i \n"
:
: "r" (cpu_asid(mm))
: "memory");
#else #else
tlb_invalid_all(); tlb_invalid_all();
#endif #endif
...@@ -53,11 +59,17 @@ void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, ...@@ -53,11 +59,17 @@ void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
end &= TLB_ENTRY_SIZE_MASK; end &= TLB_ENTRY_SIZE_MASK;
#ifdef CONFIG_CPU_HAS_TLBI #ifdef CONFIG_CPU_HAS_TLBI
sync_is();
while (start < end) { while (start < end) {
asm volatile("tlbi.vas %0"::"r"(start | newpid)); asm volatile(
"tlbi.vas %0 \n"
:
: "r" (start | newpid)
: "memory");
start += 2*PAGE_SIZE; start += 2*PAGE_SIZE;
} }
sync_is(); asm volatile("sync.i\n");
#else #else
{ {
unsigned long flags, oldpid; unsigned long flags, oldpid;
...@@ -87,11 +99,17 @@ void flush_tlb_kernel_range(unsigned long start, unsigned long end) ...@@ -87,11 +99,17 @@ void flush_tlb_kernel_range(unsigned long start, unsigned long end)
end &= TLB_ENTRY_SIZE_MASK; end &= TLB_ENTRY_SIZE_MASK;
#ifdef CONFIG_CPU_HAS_TLBI #ifdef CONFIG_CPU_HAS_TLBI
sync_is();
while (start < end) { while (start < end) {
asm volatile("tlbi.vaas %0"::"r"(start)); asm volatile(
"tlbi.vaas %0 \n"
:
: "r" (start)
: "memory");
start += 2*PAGE_SIZE; start += 2*PAGE_SIZE;
} }
sync_is(); asm volatile("sync.i\n");
#else #else
{ {
unsigned long flags, oldpid; unsigned long flags, oldpid;
...@@ -121,8 +139,13 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr) ...@@ -121,8 +139,13 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
addr &= TLB_ENTRY_SIZE_MASK; addr &= TLB_ENTRY_SIZE_MASK;
#ifdef CONFIG_CPU_HAS_TLBI #ifdef CONFIG_CPU_HAS_TLBI
asm volatile("tlbi.vas %0"::"r"(addr | newpid));
sync_is(); sync_is();
asm volatile(
"tlbi.vas %0 \n"
"sync.i \n"
:
: "r" (addr | newpid)
: "memory");
#else #else
{ {
int oldpid, idx; int oldpid, idx;
...@@ -147,8 +170,13 @@ void flush_tlb_one(unsigned long addr) ...@@ -147,8 +170,13 @@ void flush_tlb_one(unsigned long addr)
addr &= TLB_ENTRY_SIZE_MASK; addr &= TLB_ENTRY_SIZE_MASK;
#ifdef CONFIG_CPU_HAS_TLBI #ifdef CONFIG_CPU_HAS_TLBI
asm volatile("tlbi.vaas %0"::"r"(addr));
sync_is(); sync_is();
asm volatile(
"tlbi.vaas %0 \n"
"sync.i \n"
:
: "r" (addr)
: "memory");
#else #else
{ {
int oldpid, idx; int oldpid, idx;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment