Commit 7ef1c5d1 authored by Andrew Morton's avatar Andrew Morton Committed by Linus Torvalds

[PATCH] ppc64: Clean up per cpu usage in segment table code.

From: Anton Blanchard <anton@samba.org>

Clean up per cpu usage in segment table code.
parent 0aec9814
...@@ -143,7 +143,7 @@ static int make_ste(unsigned long stab, unsigned long esid, unsigned long vsid) ...@@ -143,7 +143,7 @@ static int make_ste(unsigned long stab, unsigned long esid, unsigned long vsid)
static inline void __ste_allocate(unsigned long esid, unsigned long vsid) static inline void __ste_allocate(unsigned long esid, unsigned long vsid)
{ {
unsigned char stab_entry; unsigned char stab_entry;
unsigned long *offset; unsigned long offset;
int region_id = REGION_ID(esid << SID_SHIFT); int region_id = REGION_ID(esid << SID_SHIFT);
stab_entry = make_ste(get_paca()->xStab_data.virt, esid, vsid); stab_entry = make_ste(get_paca()->xStab_data.virt, esid, vsid);
...@@ -151,11 +151,12 @@ static inline void __ste_allocate(unsigned long esid, unsigned long vsid) ...@@ -151,11 +151,12 @@ static inline void __ste_allocate(unsigned long esid, unsigned long vsid)
if (region_id != USER_REGION_ID) if (region_id != USER_REGION_ID)
return; return;
offset = &__get_cpu_var(stab_cache_ptr); offset = __get_cpu_var(stab_cache_ptr);
if (*offset < NR_STAB_CACHE_ENTRIES) { if (offset < NR_STAB_CACHE_ENTRIES)
__get_cpu_var(stab_cache[*offset]) = stab_entry; __get_cpu_var(stab_cache[offset++]) = stab_entry;
} else
(*offset)++; offset = NR_STAB_CACHE_ENTRIES+1;
__get_cpu_var(stab_cache_ptr) = offset;
} }
/* /*
...@@ -241,15 +242,15 @@ void flush_stab(struct task_struct *tsk, struct mm_struct *mm) ...@@ -241,15 +242,15 @@ void flush_stab(struct task_struct *tsk, struct mm_struct *mm)
{ {
STE *stab = (STE *) get_paca()->xStab_data.virt; STE *stab = (STE *) get_paca()->xStab_data.virt;
STE *ste; STE *ste;
unsigned long *offset = &__get_cpu_var(stab_cache_ptr); unsigned long offset = __get_cpu_var(stab_cache_ptr);
/* Force previous translations to complete. DRENG */ /* Force previous translations to complete. DRENG */
asm volatile("isync" : : : "memory"); asm volatile("isync" : : : "memory");
if (*offset <= NR_STAB_CACHE_ENTRIES) { if (offset <= NR_STAB_CACHE_ENTRIES) {
int i; int i;
for (i = 0; i < *offset; i++) { for (i = 0; i < offset; i++) {
ste = stab + __get_cpu_var(stab_cache[i]); ste = stab + __get_cpu_var(stab_cache[i]);
ste->dw0.dw0.v = 0; ste->dw0.dw0.v = 0;
} }
...@@ -274,7 +275,7 @@ void flush_stab(struct task_struct *tsk, struct mm_struct *mm) ...@@ -274,7 +275,7 @@ void flush_stab(struct task_struct *tsk, struct mm_struct *mm)
asm volatile("sync; slbia; sync":::"memory"); asm volatile("sync; slbia; sync":::"memory");
*offset = 0; __get_cpu_var(stab_cache_ptr) = 0;
preload_stab(tsk, mm); preload_stab(tsk, mm);
} }
...@@ -357,7 +358,7 @@ static inline void __slb_allocate(unsigned long esid, unsigned long vsid, ...@@ -357,7 +358,7 @@ static inline void __slb_allocate(unsigned long esid, unsigned long vsid,
{ {
int large = 0; int large = 0;
int region_id = REGION_ID(esid << SID_SHIFT); int region_id = REGION_ID(esid << SID_SHIFT);
unsigned long *offset; unsigned long offset;
if (cur_cpu_spec->cpu_features & CPU_FTR_16M_PAGE) { if (cur_cpu_spec->cpu_features & CPU_FTR_16M_PAGE) {
if (region_id == KERNEL_REGION_ID) if (region_id == KERNEL_REGION_ID)
...@@ -371,11 +372,12 @@ static inline void __slb_allocate(unsigned long esid, unsigned long vsid, ...@@ -371,11 +372,12 @@ static inline void __slb_allocate(unsigned long esid, unsigned long vsid,
if (region_id != USER_REGION_ID) if (region_id != USER_REGION_ID)
return; return;
offset = &__get_cpu_var(stab_cache_ptr); offset = __get_cpu_var(stab_cache_ptr);
if (*offset < NR_STAB_CACHE_ENTRIES) { if (offset < NR_STAB_CACHE_ENTRIES)
__get_cpu_var(stab_cache[*offset]) = esid; __get_cpu_var(stab_cache[offset++]) = esid;
} else
(*offset)++; offset = NR_STAB_CACHE_ENTRIES+1;
__get_cpu_var(stab_cache_ptr) = offset;
} }
/* /*
...@@ -454,9 +456,9 @@ static void preload_slb(struct task_struct *tsk, struct mm_struct *mm) ...@@ -454,9 +456,9 @@ static void preload_slb(struct task_struct *tsk, struct mm_struct *mm)
/* Flush all user entries from the segment table of the current processor. */ /* Flush all user entries from the segment table of the current processor. */
void flush_slb(struct task_struct *tsk, struct mm_struct *mm) void flush_slb(struct task_struct *tsk, struct mm_struct *mm)
{ {
unsigned long *offset = &__get_cpu_var(stab_cache_ptr); unsigned long offset = __get_cpu_var(stab_cache_ptr);
if (*offset <= NR_STAB_CACHE_ENTRIES) { if (offset <= NR_STAB_CACHE_ENTRIES) {
int i; int i;
union { union {
unsigned long word0; unsigned long word0;
...@@ -464,7 +466,7 @@ void flush_slb(struct task_struct *tsk, struct mm_struct *mm) ...@@ -464,7 +466,7 @@ void flush_slb(struct task_struct *tsk, struct mm_struct *mm)
} esid_data; } esid_data;
asm volatile("isync" : : : "memory"); asm volatile("isync" : : : "memory");
for (i = 0; i < *offset; i++) { for (i = 0; i < offset; i++) {
esid_data.word0 = 0; esid_data.word0 = 0;
esid_data.data.esid = __get_cpu_var(stab_cache[i]); esid_data.data.esid = __get_cpu_var(stab_cache[i]);
asm volatile("slbie %0" : : "r" (esid_data)); asm volatile("slbie %0" : : "r" (esid_data));
...@@ -474,7 +476,7 @@ void flush_slb(struct task_struct *tsk, struct mm_struct *mm) ...@@ -474,7 +476,7 @@ void flush_slb(struct task_struct *tsk, struct mm_struct *mm)
asm volatile("isync; slbia; isync" : : : "memory"); asm volatile("isync; slbia; isync" : : : "memory");
} }
*offset = 0; __get_cpu_var(stab_cache_ptr) = 0;
preload_slb(tsk, mm); preload_slb(tsk, mm);
} }
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment