Commit 36194812 authored by Aneesh Kumar K.V's avatar Aneesh Kumar K.V Committed by Michael Ellerman

powerpc/mm/radix: Update to tlb functions ric argument

Radix invalidate control (RIC) is used to control which cache to flush
using tlb instructions. When doing a PID flush, we currently flush
everything including page walk cache. For address range flush, we flush
only the TLB. In the next patch, we add support for flushing only the
page walk cache.
Signed-off-by: default avatarAneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
parent 8017ea35
...@@ -18,16 +18,20 @@ ...@@ -18,16 +18,20 @@
static DEFINE_RAW_SPINLOCK(native_tlbie_lock); static DEFINE_RAW_SPINLOCK(native_tlbie_lock);
static inline void __tlbiel_pid(unsigned long pid, int set) #define RIC_FLUSH_TLB 0
#define RIC_FLUSH_PWC 1
#define RIC_FLUSH_ALL 2
static inline void __tlbiel_pid(unsigned long pid, int set,
unsigned long ric)
{ {
unsigned long rb,rs,ric,prs,r; unsigned long rb,rs,prs,r;
rb = PPC_BIT(53); /* IS = 1 */ rb = PPC_BIT(53); /* IS = 1 */
rb |= set << PPC_BITLSHIFT(51); rb |= set << PPC_BITLSHIFT(51);
rs = ((unsigned long)pid) << PPC_BITLSHIFT(31); rs = ((unsigned long)pid) << PPC_BITLSHIFT(31);
prs = 1; /* process scoped */ prs = 1; /* process scoped */
r = 1; /* raidx format */ r = 1; /* raidx format */
ric = 2; /* invalidate all the caches */
asm volatile("ptesync": : :"memory"); asm volatile("ptesync": : :"memory");
asm volatile(".long 0x7c000224 | (%0 << 11) | (%1 << 16) |" asm volatile(".long 0x7c000224 | (%0 << 11) | (%1 << 16) |"
...@@ -39,25 +43,24 @@ static inline void __tlbiel_pid(unsigned long pid, int set) ...@@ -39,25 +43,24 @@ static inline void __tlbiel_pid(unsigned long pid, int set)
/* /*
* We use 128 set in radix mode and 256 set in hpt mode. * We use 128 set in radix mode and 256 set in hpt mode.
*/ */
static inline void _tlbiel_pid(unsigned long pid) static inline void _tlbiel_pid(unsigned long pid, unsigned long ric)
{ {
int set; int set;
for (set = 0; set < POWER9_TLB_SETS_RADIX ; set++) { for (set = 0; set < POWER9_TLB_SETS_RADIX ; set++) {
__tlbiel_pid(pid, set); __tlbiel_pid(pid, set, ric);
} }
return; return;
} }
static inline void _tlbie_pid(unsigned long pid) static inline void _tlbie_pid(unsigned long pid, unsigned long ric)
{ {
unsigned long rb,rs,ric,prs,r; unsigned long rb,rs,prs,r;
rb = PPC_BIT(53); /* IS = 1 */ rb = PPC_BIT(53); /* IS = 1 */
rs = pid << PPC_BITLSHIFT(31); rs = pid << PPC_BITLSHIFT(31);
prs = 1; /* process scoped */ prs = 1; /* process scoped */
r = 1; /* raidx format */ r = 1; /* raidx format */
ric = 2; /* invalidate all the caches */
asm volatile("ptesync": : :"memory"); asm volatile("ptesync": : :"memory");
asm volatile(".long 0x7c000264 | (%0 << 11) | (%1 << 16) |" asm volatile(".long 0x7c000264 | (%0 << 11) | (%1 << 16) |"
...@@ -67,16 +70,15 @@ static inline void _tlbie_pid(unsigned long pid) ...@@ -67,16 +70,15 @@ static inline void _tlbie_pid(unsigned long pid)
} }
static inline void _tlbiel_va(unsigned long va, unsigned long pid, static inline void _tlbiel_va(unsigned long va, unsigned long pid,
unsigned long ap) unsigned long ap, unsigned long ric)
{ {
unsigned long rb,rs,ric,prs,r; unsigned long rb,rs,prs,r;
rb = va & ~(PPC_BITMASK(52, 63)); rb = va & ~(PPC_BITMASK(52, 63));
rb |= ap << PPC_BITLSHIFT(58); rb |= ap << PPC_BITLSHIFT(58);
rs = pid << PPC_BITLSHIFT(31); rs = pid << PPC_BITLSHIFT(31);
prs = 1; /* process scoped */ prs = 1; /* process scoped */
r = 1; /* raidx format */ r = 1; /* raidx format */
ric = 0; /* no cluster flush yet */
asm volatile("ptesync": : :"memory"); asm volatile("ptesync": : :"memory");
asm volatile(".long 0x7c000224 | (%0 << 11) | (%1 << 16) |" asm volatile(".long 0x7c000224 | (%0 << 11) | (%1 << 16) |"
...@@ -86,16 +88,15 @@ static inline void _tlbiel_va(unsigned long va, unsigned long pid, ...@@ -86,16 +88,15 @@ static inline void _tlbiel_va(unsigned long va, unsigned long pid,
} }
static inline void _tlbie_va(unsigned long va, unsigned long pid, static inline void _tlbie_va(unsigned long va, unsigned long pid,
unsigned long ap) unsigned long ap, unsigned long ric)
{ {
unsigned long rb,rs,ric,prs,r; unsigned long rb,rs,prs,r;
rb = va & ~(PPC_BITMASK(52, 63)); rb = va & ~(PPC_BITMASK(52, 63));
rb |= ap << PPC_BITLSHIFT(58); rb |= ap << PPC_BITLSHIFT(58);
rs = pid << PPC_BITLSHIFT(31); rs = pid << PPC_BITLSHIFT(31);
prs = 1; /* process scoped */ prs = 1; /* process scoped */
r = 1; /* raidx format */ r = 1; /* raidx format */
ric = 0; /* no cluster flush yet */
asm volatile("ptesync": : :"memory"); asm volatile("ptesync": : :"memory");
asm volatile(".long 0x7c000264 | (%0 << 11) | (%1 << 16) |" asm volatile(".long 0x7c000264 | (%0 << 11) | (%1 << 16) |"
...@@ -122,7 +123,7 @@ void radix__local_flush_tlb_mm(struct mm_struct *mm) ...@@ -122,7 +123,7 @@ void radix__local_flush_tlb_mm(struct mm_struct *mm)
preempt_disable(); preempt_disable();
pid = mm->context.id; pid = mm->context.id;
if (pid != MMU_NO_CONTEXT) if (pid != MMU_NO_CONTEXT)
_tlbiel_pid(pid); _tlbiel_pid(pid, RIC_FLUSH_ALL);
preempt_enable(); preempt_enable();
} }
EXPORT_SYMBOL(radix__local_flush_tlb_mm); EXPORT_SYMBOL(radix__local_flush_tlb_mm);
...@@ -135,7 +136,7 @@ void radix___local_flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr, ...@@ -135,7 +136,7 @@ void radix___local_flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr,
preempt_disable(); preempt_disable();
pid = mm ? mm->context.id : 0; pid = mm ? mm->context.id : 0;
if (pid != MMU_NO_CONTEXT) if (pid != MMU_NO_CONTEXT)
_tlbiel_va(vmaddr, pid, ap); _tlbiel_va(vmaddr, pid, ap, RIC_FLUSH_TLB);
preempt_enable(); preempt_enable();
} }
...@@ -172,11 +173,11 @@ void radix__flush_tlb_mm(struct mm_struct *mm) ...@@ -172,11 +173,11 @@ void radix__flush_tlb_mm(struct mm_struct *mm)
if (lock_tlbie) if (lock_tlbie)
raw_spin_lock(&native_tlbie_lock); raw_spin_lock(&native_tlbie_lock);
_tlbie_pid(pid); _tlbie_pid(pid, RIC_FLUSH_ALL);
if (lock_tlbie) if (lock_tlbie)
raw_spin_unlock(&native_tlbie_lock); raw_spin_unlock(&native_tlbie_lock);
} else } else
_tlbiel_pid(pid); _tlbiel_pid(pid, RIC_FLUSH_ALL);
no_context: no_context:
preempt_enable(); preempt_enable();
} }
...@@ -196,11 +197,11 @@ void radix___flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr, ...@@ -196,11 +197,11 @@ void radix___flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr,
if (lock_tlbie) if (lock_tlbie)
raw_spin_lock(&native_tlbie_lock); raw_spin_lock(&native_tlbie_lock);
_tlbie_va(vmaddr, pid, ap); _tlbie_va(vmaddr, pid, ap, RIC_FLUSH_TLB);
if (lock_tlbie) if (lock_tlbie)
raw_spin_unlock(&native_tlbie_lock); raw_spin_unlock(&native_tlbie_lock);
} else } else
_tlbiel_va(vmaddr, pid, ap); _tlbiel_va(vmaddr, pid, ap, RIC_FLUSH_TLB);
bail: bail:
preempt_enable(); preempt_enable();
} }
...@@ -224,7 +225,7 @@ void radix__flush_tlb_kernel_range(unsigned long start, unsigned long end) ...@@ -224,7 +225,7 @@ void radix__flush_tlb_kernel_range(unsigned long start, unsigned long end)
if (lock_tlbie) if (lock_tlbie)
raw_spin_lock(&native_tlbie_lock); raw_spin_lock(&native_tlbie_lock);
_tlbie_pid(0); _tlbie_pid(0, RIC_FLUSH_ALL);
if (lock_tlbie) if (lock_tlbie)
raw_spin_unlock(&native_tlbie_lock); raw_spin_unlock(&native_tlbie_lock);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment