Commit 91ec450f authored by Christophe Leroy's avatar Christophe Leroy Committed by Michael Ellerman

powerpc/32s: Split and inline flush_range()

flush_range() handle both the MMU_FTR_HPTE_TABLE case and
the other case.

The non MMU_FTR_HPTE_TABLE case is trivial as it is only a call
to _tlbie()/_tlbia() which is not worth a dedicated function.

Make flush_range() a hash specific and call it from tlbflush.h based
on mmu_has_feature(MMU_FTR_HPTE_TABLE).
Signed-off-by: default avatarChristophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/132ab19aae52abc8e06ab524ec86d4229b5b9c3d.1603348103.git.christophe.leroy@csgroup.eu
parent 1e83396f
...@@ -8,7 +8,7 @@ ...@@ -8,7 +8,7 @@
*/ */
void hash__flush_tlb_mm(struct mm_struct *mm); void hash__flush_tlb_mm(struct mm_struct *mm);
void hash__flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr); void hash__flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
void flush_range(struct mm_struct *mm, unsigned long start, unsigned long end); void hash__flush_range(struct mm_struct *mm, unsigned long start, unsigned long end);
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
void _tlbie(unsigned long address); void _tlbie(unsigned long address);
...@@ -20,6 +20,17 @@ static inline void _tlbie(unsigned long address) ...@@ -20,6 +20,17 @@ static inline void _tlbie(unsigned long address)
#endif #endif
void _tlbia(void); void _tlbia(void);
static inline void flush_range(struct mm_struct *mm, unsigned long start, unsigned long end)
{
start &= PAGE_MASK;
if (mmu_has_feature(MMU_FTR_HPTE_TABLE))
hash__flush_range(mm, start, end);
else if (end - start <= PAGE_SIZE)
_tlbie(start);
else
_tlbia();
}
static inline void flush_tlb_mm(struct mm_struct *mm) static inline void flush_tlb_mm(struct mm_struct *mm)
{ {
if (mmu_has_feature(MMU_FTR_HPTE_TABLE)) if (mmu_has_feature(MMU_FTR_HPTE_TABLE))
......
...@@ -76,7 +76,7 @@ void tlb_flush(struct mmu_gather *tlb) ...@@ -76,7 +76,7 @@ void tlb_flush(struct mmu_gather *tlb)
* and check _PAGE_HASHPTE bit; if it is set, find and destroy * and check _PAGE_HASHPTE bit; if it is set, find and destroy
* the corresponding HPTE. * the corresponding HPTE.
*/ */
void flush_range(struct mm_struct *mm, unsigned long start, unsigned long end) void hash__flush_range(struct mm_struct *mm, unsigned long start, unsigned long end)
{ {
pmd_t *pmd; pmd_t *pmd;
unsigned long pmd_end; unsigned long pmd_end;
...@@ -84,13 +84,6 @@ void flush_range(struct mm_struct *mm, unsigned long start, unsigned long end) ...@@ -84,13 +84,6 @@ void flush_range(struct mm_struct *mm, unsigned long start, unsigned long end)
unsigned int ctx = mm->context.id; unsigned int ctx = mm->context.id;
start &= PAGE_MASK; start &= PAGE_MASK;
if (!mmu_has_feature(MMU_FTR_HPTE_TABLE)) {
if (end - start <= PAGE_SIZE)
_tlbie(start);
else
_tlbia();
return;
}
if (start >= end) if (start >= end)
return; return;
end = (end - 1) | ~PAGE_MASK; end = (end - 1) | ~PAGE_MASK;
...@@ -109,7 +102,7 @@ void flush_range(struct mm_struct *mm, unsigned long start, unsigned long end) ...@@ -109,7 +102,7 @@ void flush_range(struct mm_struct *mm, unsigned long start, unsigned long end)
++pmd; ++pmd;
} }
} }
EXPORT_SYMBOL(flush_range); EXPORT_SYMBOL(hash__flush_range);
/* /*
* Flush all the (user) entries for the address space described by mm. * Flush all the (user) entries for the address space described by mm.
...@@ -125,7 +118,7 @@ void hash__flush_tlb_mm(struct mm_struct *mm) ...@@ -125,7 +118,7 @@ void hash__flush_tlb_mm(struct mm_struct *mm)
* but it seems dup_mmap is the only SMP case which gets here. * but it seems dup_mmap is the only SMP case which gets here.
*/ */
for (mp = mm->mmap; mp != NULL; mp = mp->vm_next) for (mp = mm->mmap; mp != NULL; mp = mp->vm_next)
flush_range(mp->vm_mm, mp->vm_start, mp->vm_end); hash__flush_range(mp->vm_mm, mp->vm_start, mp->vm_end);
} }
EXPORT_SYMBOL(hash__flush_tlb_mm); EXPORT_SYMBOL(hash__flush_tlb_mm);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment