Commit 4150e50b authored by Catalin Marinas's avatar Catalin Marinas Committed by Will Deacon

arm64: Use last level TLBI for user pte changes

The flush_tlb_page() function is used on user address ranges when PTEs
(or PMDs/PUDs for huge pages) were changed (attributes or clearing). For
such cases, it is more efficient to invalidate only the last level of
the TLB with the "tlbi vale1is" instruction.

In the TLB shoot-down case, the TLB caching of the intermediate page
table levels (pmd, pud, pgd) is handled by __flush_tlb_pgtable() via the
__(pte|pmd|pud)_free_tlb() functions and it is not deferred to
tlb_finish_mmu() (as of commit 285994a6 - "arm64: Invalidate the TLB
corresponding to intermediate page table levels"). The tlb_flush()
function only needs to invalidate the TLB for the last level of page
tables; the __flush_tlb_range() function gains a fourth argument for
last level TLBI.
Signed-off-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
Cc: Will Deacon <will.deacon@arm.com>
Signed-off-by: default avatarWill Deacon <will.deacon@arm.com>
parent da4e7330
...@@ -41,7 +41,12 @@ static inline void tlb_flush(struct mmu_gather *tlb) ...@@ -41,7 +41,12 @@ static inline void tlb_flush(struct mmu_gather *tlb)
flush_tlb_mm(tlb->mm); flush_tlb_mm(tlb->mm);
} else { } else {
struct vm_area_struct vma = { .vm_mm = tlb->mm, }; struct vm_area_struct vma = { .vm_mm = tlb->mm, };
flush_tlb_range(&vma, tlb->start, tlb->end); /*
* The intermediate page table levels are already handled by
* the __(pte|pmd|pud)_free_tlb() functions, so last level
* TLBI is sufficient here.
*/
__flush_tlb_range(&vma, tlb->start, tlb->end, true);
} }
} }
......
...@@ -87,7 +87,7 @@ static inline void flush_tlb_page(struct vm_area_struct *vma, ...@@ -87,7 +87,7 @@ static inline void flush_tlb_page(struct vm_area_struct *vma,
((unsigned long)ASID(vma->vm_mm) << 48); ((unsigned long)ASID(vma->vm_mm) << 48);
dsb(ishst); dsb(ishst);
asm("tlbi vae1is, %0" : : "r" (addr)); asm("tlbi vale1is, %0" : : "r" (addr));
dsb(ish); dsb(ish);
} }
...@@ -97,8 +97,9 @@ static inline void flush_tlb_page(struct vm_area_struct *vma, ...@@ -97,8 +97,9 @@ static inline void flush_tlb_page(struct vm_area_struct *vma,
*/ */
#define MAX_TLB_RANGE (1024UL << PAGE_SHIFT) #define MAX_TLB_RANGE (1024UL << PAGE_SHIFT)
static inline void flush_tlb_range(struct vm_area_struct *vma, static inline void __flush_tlb_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end) unsigned long start, unsigned long end,
bool last_level)
{ {
unsigned long asid = (unsigned long)ASID(vma->vm_mm) << 48; unsigned long asid = (unsigned long)ASID(vma->vm_mm) << 48;
unsigned long addr; unsigned long addr;
...@@ -112,11 +113,21 @@ static inline void flush_tlb_range(struct vm_area_struct *vma, ...@@ -112,11 +113,21 @@ static inline void flush_tlb_range(struct vm_area_struct *vma,
end = asid | (end >> 12); end = asid | (end >> 12);
dsb(ishst); dsb(ishst);
for (addr = start; addr < end; addr += 1 << (PAGE_SHIFT - 12)) for (addr = start; addr < end; addr += 1 << (PAGE_SHIFT - 12)) {
if (last_level)
asm("tlbi vale1is, %0" : : "r"(addr));
else
asm("tlbi vae1is, %0" : : "r"(addr)); asm("tlbi vae1is, %0" : : "r"(addr));
}
dsb(ish); dsb(ish);
} }
static inline void flush_tlb_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end)
{
__flush_tlb_range(vma, start, end, false);
}
static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end) static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end)
{ {
unsigned long addr; unsigned long addr;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment