Commit ace8cb75 authored by Will Deacon's avatar Will Deacon Committed by Catalin Marinas

arm64: tlb: Avoid synchronous TLBIs when freeing page tables

By selecting HAVE_RCU_TABLE_INVALIDATE, we can rely on tlb_flush() being
called if we fail to batch table pages for freeing. This in turn allows
us to postpone walk-cache invalidation until tlb_finish_mmu(), which
avoids lots of unnecessary DSBs and means we can shoot down the ASID if
the range is large enough.
Acked-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: default avatarWill Deacon <will.deacon@arm.com>
Signed-off-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
parent f270ab88
...@@ -143,6 +143,7 @@ config ARM64 ...@@ -143,6 +143,7 @@ config ARM64
select HAVE_PERF_USER_STACK_DUMP select HAVE_PERF_USER_STACK_DUMP
select HAVE_REGS_AND_STACK_ACCESS_API select HAVE_REGS_AND_STACK_ACCESS_API
select HAVE_RCU_TABLE_FREE select HAVE_RCU_TABLE_FREE
select HAVE_RCU_TABLE_INVALIDATE
select HAVE_RSEQ select HAVE_RSEQ
select HAVE_STACKPROTECTOR select HAVE_STACKPROTECTOR
select HAVE_SYSCALL_TRACEPOINTS select HAVE_SYSCALL_TRACEPOINTS
......
...@@ -54,7 +54,6 @@ static inline void tlb_flush(struct mmu_gather *tlb) ...@@ -54,7 +54,6 @@ static inline void tlb_flush(struct mmu_gather *tlb)
static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
unsigned long addr) unsigned long addr)
{ {
__flush_tlb_pgtable(tlb->mm, addr);
pgtable_page_dtor(pte); pgtable_page_dtor(pte);
tlb_remove_table(tlb, pte); tlb_remove_table(tlb, pte);
} }
...@@ -63,7 +62,6 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, ...@@ -63,7 +62,6 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp, static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp,
unsigned long addr) unsigned long addr)
{ {
__flush_tlb_pgtable(tlb->mm, addr);
tlb_remove_table(tlb, virt_to_page(pmdp)); tlb_remove_table(tlb, virt_to_page(pmdp));
} }
#endif #endif
...@@ -72,7 +70,6 @@ static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp, ...@@ -72,7 +70,6 @@ static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp,
static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pudp, static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pudp,
unsigned long addr) unsigned long addr)
{ {
__flush_tlb_pgtable(tlb->mm, addr);
tlb_remove_table(tlb, virt_to_page(pudp)); tlb_remove_table(tlb, virt_to_page(pudp));
} }
#endif #endif
......
...@@ -215,17 +215,6 @@ static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end ...@@ -215,17 +215,6 @@ static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end
* Used to invalidate the TLB (walk caches) corresponding to intermediate page * Used to invalidate the TLB (walk caches) corresponding to intermediate page
* table levels (pgd/pud/pmd). * table levels (pgd/pud/pmd).
*/ */
static inline void __flush_tlb_pgtable(struct mm_struct *mm,
unsigned long uaddr)
{
unsigned long addr = __TLBI_VADDR(uaddr, ASID(mm));
dsb(ishst);
__tlbi(vae1is, addr);
__tlbi_user(vae1is, addr);
dsb(ish);
}
static inline void __flush_tlb_kernel_pgtable(unsigned long kaddr) static inline void __flush_tlb_kernel_pgtable(unsigned long kaddr)
{ {
unsigned long addr = __TLBI_VADDR(kaddr, 0); unsigned long addr = __TLBI_VADDR(kaddr, 0);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment