Commit 3c9ac2bc authored by Michael Ellerman's avatar Michael Ellerman

powerpc/mm/radix: Drop support for CPUs without lockless tlbie

Currently the radix TLB code includes support for CPUs that do *not*
have MMU_FTR_LOCKLESS_TLBIE. On those CPUs we are required to take a
global spinlock before issuing a tlbie.

Radix can only be built for 64-bit Book3s CPUs, and of those, only
POWER4, 970, Cell and PA6T do not have MMU_FTR_LOCKLESS_TLBIE. Although
it's possible to build a kernel with Radix support that can also boot on
those CPUs, we happen to know that in reality none of those CPUs support
the Radix MMU, so the code can never actually run on those CPUs.

So remove the native_tlbie_lock in the Radix TLB code.

Note that there is another lock of the same name in the hash code, which
is unaffected by this patch.
Reviewed-by: default avatarNicholas Piggin <npiggin@gmail.com>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
parent d93b0ac0
...@@ -17,7 +17,6 @@ ...@@ -17,7 +17,6 @@
#include <asm/tlb.h> #include <asm/tlb.h>
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
static DEFINE_RAW_SPINLOCK(native_tlbie_lock);
#define RIC_FLUSH_TLB 0 #define RIC_FLUSH_TLB 0
#define RIC_FLUSH_PWC 1 #define RIC_FLUSH_PWC 1
...@@ -203,15 +202,9 @@ void radix__flush_tlb_mm(struct mm_struct *mm) ...@@ -203,15 +202,9 @@ void radix__flush_tlb_mm(struct mm_struct *mm)
if (unlikely(pid == MMU_NO_CONTEXT)) if (unlikely(pid == MMU_NO_CONTEXT))
goto no_context; goto no_context;
if (!mm_is_thread_local(mm)) { if (!mm_is_thread_local(mm))
int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
if (lock_tlbie)
raw_spin_lock(&native_tlbie_lock);
_tlbie_pid(pid, RIC_FLUSH_ALL); _tlbie_pid(pid, RIC_FLUSH_ALL);
if (lock_tlbie) else
raw_spin_unlock(&native_tlbie_lock);
} else
_tlbiel_pid(pid, RIC_FLUSH_ALL); _tlbiel_pid(pid, RIC_FLUSH_ALL);
no_context: no_context:
preempt_enable(); preempt_enable();
...@@ -235,15 +228,9 @@ void radix__flush_tlb_pwc(struct mmu_gather *tlb, unsigned long addr) ...@@ -235,15 +228,9 @@ void radix__flush_tlb_pwc(struct mmu_gather *tlb, unsigned long addr)
if (unlikely(pid == MMU_NO_CONTEXT)) if (unlikely(pid == MMU_NO_CONTEXT))
goto no_context; goto no_context;
if (!mm_is_thread_local(mm)) { if (!mm_is_thread_local(mm))
int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
if (lock_tlbie)
raw_spin_lock(&native_tlbie_lock);
_tlbie_pid(pid, RIC_FLUSH_PWC); _tlbie_pid(pid, RIC_FLUSH_PWC);
if (lock_tlbie) else
raw_spin_unlock(&native_tlbie_lock);
} else
tlbiel_pwc(pid); tlbiel_pwc(pid);
no_context: no_context:
preempt_enable(); preempt_enable();
...@@ -260,15 +247,9 @@ void radix__flush_tlb_page_psize(struct mm_struct *mm, unsigned long vmaddr, ...@@ -260,15 +247,9 @@ void radix__flush_tlb_page_psize(struct mm_struct *mm, unsigned long vmaddr,
pid = mm ? mm->context.id : 0; pid = mm ? mm->context.id : 0;
if (unlikely(pid == MMU_NO_CONTEXT)) if (unlikely(pid == MMU_NO_CONTEXT))
goto bail; goto bail;
if (!mm_is_thread_local(mm)) { if (!mm_is_thread_local(mm))
int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
if (lock_tlbie)
raw_spin_lock(&native_tlbie_lock);
_tlbie_va(vmaddr, pid, ap, RIC_FLUSH_TLB); _tlbie_va(vmaddr, pid, ap, RIC_FLUSH_TLB);
if (lock_tlbie) else
raw_spin_unlock(&native_tlbie_lock);
} else
_tlbiel_va(vmaddr, pid, ap, RIC_FLUSH_TLB); _tlbiel_va(vmaddr, pid, ap, RIC_FLUSH_TLB);
bail: bail:
preempt_enable(); preempt_enable();
...@@ -289,13 +270,7 @@ EXPORT_SYMBOL(radix__flush_tlb_page); ...@@ -289,13 +270,7 @@ EXPORT_SYMBOL(radix__flush_tlb_page);
void radix__flush_tlb_kernel_range(unsigned long start, unsigned long end) void radix__flush_tlb_kernel_range(unsigned long start, unsigned long end)
{ {
int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
if (lock_tlbie)
raw_spin_lock(&native_tlbie_lock);
_tlbie_pid(0, RIC_FLUSH_ALL); _tlbie_pid(0, RIC_FLUSH_ALL);
if (lock_tlbie)
raw_spin_unlock(&native_tlbie_lock);
} }
EXPORT_SYMBOL(radix__flush_tlb_kernel_range); EXPORT_SYMBOL(radix__flush_tlb_kernel_range);
...@@ -357,7 +332,6 @@ void radix__flush_tlb_range_psize(struct mm_struct *mm, unsigned long start, ...@@ -357,7 +332,6 @@ void radix__flush_tlb_range_psize(struct mm_struct *mm, unsigned long start,
unsigned long addr; unsigned long addr;
int local = mm_is_thread_local(mm); int local = mm_is_thread_local(mm);
unsigned long ap = mmu_get_ap(psize); unsigned long ap = mmu_get_ap(psize);
int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
unsigned long page_size = 1UL << mmu_psize_defs[psize].shift; unsigned long page_size = 1UL << mmu_psize_defs[psize].shift;
...@@ -378,13 +352,8 @@ void radix__flush_tlb_range_psize(struct mm_struct *mm, unsigned long start, ...@@ -378,13 +352,8 @@ void radix__flush_tlb_range_psize(struct mm_struct *mm, unsigned long start,
if (local) if (local)
_tlbiel_va(addr, pid, ap, RIC_FLUSH_TLB); _tlbiel_va(addr, pid, ap, RIC_FLUSH_TLB);
else { else
if (lock_tlbie)
raw_spin_lock(&native_tlbie_lock);
_tlbie_va(addr, pid, ap, RIC_FLUSH_TLB); _tlbie_va(addr, pid, ap, RIC_FLUSH_TLB);
if (lock_tlbie)
raw_spin_unlock(&native_tlbie_lock);
}
} }
err_out: err_out:
preempt_enable(); preempt_enable();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment