Commit a42d6ba8 authored by Aneesh Kumar K.V's avatar Aneesh Kumar K.V Committed by Michael Ellerman

powerpc/mm/book3s64/radix: Remove unused code.

mm_tlb_flush_nested change was added in the mmu gather tlb flush to
handle the case of parallel pte invalidate happening with mmap_sem
held in read mode. This fix was done by commit
02390f66 ("powerpc/64s/radix: Fix MADV_[FREE|DONTNEED] TLB flush
miss problem with THP") and the problem is explained in detail in
commit 99baac21 ("mm: fix MADV_[FREE|DONTNEED] TLB flush miss
problem")

This was later updated by commit 7a30df49 ("mm: mmu_gather: remove
__tlb_reset_range() for force flush") to do a full mm flush rather
than a range flush. By commit dd2283f2 ("mm: mmap: zap pages with
read mmap_sem in munmap") we are also now allowing a page table free
in mmap_sem read mode which means we should do a PWC flush too. Our
current full mm flush imply a PWC flush.

With all the above change the mm_tlb_flush_nested(mm) branch in
radix__tlb_flush will never be taken because for the nested case we
would have taken the if (tlb->fullmm) branch. This patch removes the
unused code. Also, remove the gflush change in
__radix__flush_tlb_range that was added to handle the range tlb flush
code. We only check for THP there because hugetlb is flushed via a
different code path where page size is explicitly specified.

This is a partial revert of commit 02390f66 ("powerpc/64s/radix:
Fix MADV_[FREE|DONTNEED] TLB flush miss problem with THP")
Signed-off-by: default avatarAneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20191024075801.22434-1-aneesh.kumar@linux.ibm.com
parent 8e6b6da9
......@@ -832,8 +832,7 @@ static unsigned long tlb_single_page_flush_ceiling __read_mostly = 33;
static unsigned long tlb_local_single_page_flush_ceiling __read_mostly = POWER9_TLB_SETS_RADIX * 2;
static inline void __radix__flush_tlb_range(struct mm_struct *mm,
unsigned long start, unsigned long end,
bool flush_all_sizes)
unsigned long start, unsigned long end)
{
unsigned long pid;
......@@ -879,26 +878,16 @@ static inline void __radix__flush_tlb_range(struct mm_struct *mm,
}
}
} else {
bool hflush = flush_all_sizes;
bool gflush = flush_all_sizes;
bool hflush = false;
unsigned long hstart, hend;
unsigned long gstart, gend;
if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
hflush = true;
if (hflush) {
if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) {
hstart = (start + PMD_SIZE - 1) & PMD_MASK;
hend = end & PMD_MASK;
if (hstart == hend)
hflush = false;
}
if (gflush) {
gstart = (start + PUD_SIZE - 1) & PUD_MASK;
gend = end & PUD_MASK;
if (gstart == gend)
gflush = false;
else
hflush = true;
}
if (local) {
......@@ -907,9 +896,6 @@ static inline void __radix__flush_tlb_range(struct mm_struct *mm,
if (hflush)
__tlbiel_va_range(hstart, hend, pid,
PMD_SIZE, MMU_PAGE_2M);
if (gflush)
__tlbiel_va_range(gstart, gend, pid,
PUD_SIZE, MMU_PAGE_1G);
asm volatile("ptesync": : :"memory");
} else if (cputlb_use_tlbie()) {
asm volatile("ptesync": : :"memory");
......@@ -917,10 +903,6 @@ static inline void __radix__flush_tlb_range(struct mm_struct *mm,
if (hflush)
__tlbie_va_range(hstart, hend, pid,
PMD_SIZE, MMU_PAGE_2M);
if (gflush)
__tlbie_va_range(gstart, gend, pid,
PUD_SIZE, MMU_PAGE_1G);
asm volatile("eieio; tlbsync; ptesync": : :"memory");
} else {
_tlbiel_va_range_multicast(mm,
......@@ -928,9 +910,6 @@ static inline void __radix__flush_tlb_range(struct mm_struct *mm,
if (hflush)
_tlbiel_va_range_multicast(mm,
hstart, hend, pid, PMD_SIZE, MMU_PAGE_2M, false);
if (gflush)
_tlbiel_va_range_multicast(mm,
gstart, gend, pid, PUD_SIZE, MMU_PAGE_1G, false);
}
}
preempt_enable();
......@@ -945,7 +924,7 @@ void radix__flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
return radix__flush_hugetlb_tlb_range(vma, start, end);
#endif
__radix__flush_tlb_range(vma->vm_mm, start, end, false);
__radix__flush_tlb_range(vma->vm_mm, start, end);
}
EXPORT_SYMBOL(radix__flush_tlb_range);
......@@ -1023,39 +1002,6 @@ void radix__tlb_flush(struct mmu_gather *tlb)
*/
if (tlb->fullmm) {
__flush_all_mm(mm, true);
#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLB_PAGE)
} else if (mm_tlb_flush_nested(mm)) {
/*
* If there is a concurrent invalidation that is clearing ptes,
* then it's possible this invalidation will miss one of those
* cleared ptes and miss flushing the TLB. If this invalidate
* returns before the other one flushes TLBs, that can result
* in it returning while there are still valid TLBs inside the
* range to be invalidated.
*
* See mm/memory.c:tlb_finish_mmu() for more details.
*
* The solution to this is ensure the entire range is always
* flushed here. The problem for powerpc is that the flushes
* are page size specific, so this "forced flush" would not
* do the right thing if there are a mix of page sizes in
* the range to be invalidated. So use __flush_tlb_range
* which invalidates all possible page sizes in the range.
*
* PWC flush probably is not be required because the core code
* shouldn't free page tables in this path, but accounting
* for the possibility makes us a bit more robust.
*
* need_flush_all is an uncommon case because page table
* teardown should be done with exclusive locks held (but
* after locks are dropped another invalidate could come
* in), it could be optimized further if necessary.
*/
if (!tlb->need_flush_all)
__radix__flush_tlb_range(mm, start, end, true);
else
radix__flush_all_mm(mm);
#endif
} else if ( (psize = radix_get_mmu_psize(page_size)) == -1) {
if (!tlb->need_flush_all)
radix__flush_tlb_mm(mm);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment