Commit 0b317c38 authored by Paul Burton's avatar Paul Burton

MIPS: mm: Add set_cpu_context() for ASID assignments

When we gain MMID support we'll be storing MMIDs as atomic64_t values
and accessing them via atomic64_* functions. This necessitates that we
don't use cpu_context() as the left hand side of an assignment, ie. as a
modifiable lvalue. In preparation for this introduce a new
set_cpu_context() function & replace all assignments with cpu_context()
on their left hand side with an equivalent call to set_cpu_context().

To enforce that cpu_context() should not be used for assignments, we
rewrite it as a static inline function.
Signed-off-by: default avatarPaul Burton <paul.burton@mips.com>
Cc: linux-mips@vger.kernel.org
parent 42d5b846
...@@ -88,7 +88,17 @@ static inline u64 asid_first_version(unsigned int cpu) ...@@ -88,7 +88,17 @@ static inline u64 asid_first_version(unsigned int cpu)
return ~asid_version_mask(cpu) + 1; return ~asid_version_mask(cpu) + 1;
} }
#define cpu_context(cpu, mm) ((mm)->context.asid[cpu]) static inline u64 cpu_context(unsigned int cpu, const struct mm_struct *mm)
{
return mm->context.asid[cpu];
}
static inline void set_cpu_context(unsigned int cpu,
struct mm_struct *mm, u64 ctx)
{
mm->context.asid[cpu] = ctx;
}
#define asid_cache(cpu) (cpu_data[cpu].asid_cache) #define asid_cache(cpu) (cpu_data[cpu].asid_cache)
#define cpu_asid(cpu, mm) \ #define cpu_asid(cpu, mm) \
(cpu_context((cpu), (mm)) & cpu_asid_mask(&cpu_data[cpu])) (cpu_context((cpu), (mm)) & cpu_asid_mask(&cpu_data[cpu]))
...@@ -111,7 +121,7 @@ init_new_context(struct task_struct *tsk, struct mm_struct *mm) ...@@ -111,7 +121,7 @@ init_new_context(struct task_struct *tsk, struct mm_struct *mm)
int i; int i;
for_each_possible_cpu(i) for_each_possible_cpu(i)
cpu_context(i, mm) = 0; set_cpu_context(i, mm, 0);
mm->context.bd_emupage_allocmap = NULL; mm->context.bd_emupage_allocmap = NULL;
spin_lock_init(&mm->context.bd_emupage_lock); spin_lock_init(&mm->context.bd_emupage_lock);
...@@ -175,7 +185,7 @@ drop_mmu_context(struct mm_struct *mm) ...@@ -175,7 +185,7 @@ drop_mmu_context(struct mm_struct *mm)
htw_start(); htw_start();
} else { } else {
/* will get a new context next time */ /* will get a new context next time */
cpu_context(cpu, mm) = 0; set_cpu_context(cpu, mm, 0);
} }
local_irq_restore(flags); local_irq_restore(flags);
......
...@@ -537,7 +537,7 @@ void flush_tlb_mm(struct mm_struct *mm) ...@@ -537,7 +537,7 @@ void flush_tlb_mm(struct mm_struct *mm)
for_each_online_cpu(cpu) { for_each_online_cpu(cpu) {
if (cpu != smp_processor_id() && cpu_context(cpu, mm)) if (cpu != smp_processor_id() && cpu_context(cpu, mm))
cpu_context(cpu, mm) = 0; set_cpu_context(cpu, mm, 0);
} }
} }
drop_mmu_context(mm); drop_mmu_context(mm);
...@@ -583,7 +583,7 @@ void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned l ...@@ -583,7 +583,7 @@ void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned l
* mm has been completely unused by that CPU. * mm has been completely unused by that CPU.
*/ */
if (cpu != smp_processor_id() && cpu_context(cpu, mm)) if (cpu != smp_processor_id() && cpu_context(cpu, mm))
cpu_context(cpu, mm) = !exec; set_cpu_context(cpu, mm, !exec);
} }
} }
local_flush_tlb_range(vma, start, end); local_flush_tlb_range(vma, start, end);
...@@ -635,7 +635,7 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long page) ...@@ -635,7 +635,7 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
* by that CPU. * by that CPU.
*/ */
if (cpu != smp_processor_id() && cpu_context(cpu, vma->vm_mm)) if (cpu != smp_processor_id() && cpu_context(cpu, vma->vm_mm))
cpu_context(cpu, vma->vm_mm) = 1; set_cpu_context(cpu, vma->vm_mm, 1);
} }
} }
local_flush_tlb_page(vma, page); local_flush_tlb_page(vma, page);
......
...@@ -1019,7 +1019,7 @@ static void kvm_mips_change_entryhi(struct kvm_vcpu *vcpu, ...@@ -1019,7 +1019,7 @@ static void kvm_mips_change_entryhi(struct kvm_vcpu *vcpu,
get_new_mmu_context(kern_mm); get_new_mmu_context(kern_mm);
for_each_possible_cpu(i) for_each_possible_cpu(i)
if (i != cpu) if (i != cpu)
cpu_context(i, kern_mm) = 0; set_cpu_context(i, kern_mm, 0);
preempt_enable(); preempt_enable();
} }
kvm_write_c0_guest_entryhi(cop0, entryhi); kvm_write_c0_guest_entryhi(cop0, entryhi);
...@@ -1090,8 +1090,8 @@ static void kvm_mips_invalidate_guest_tlb(struct kvm_vcpu *vcpu, ...@@ -1090,8 +1090,8 @@ static void kvm_mips_invalidate_guest_tlb(struct kvm_vcpu *vcpu,
if (i == cpu) if (i == cpu)
continue; continue;
if (user) if (user)
cpu_context(i, user_mm) = 0; set_cpu_context(i, user_mm, 0);
cpu_context(i, kern_mm) = 0; set_cpu_context(i, kern_mm, 0);
} }
preempt_enable(); preempt_enable();
......
...@@ -1098,8 +1098,8 @@ static void kvm_trap_emul_check_requests(struct kvm_vcpu *vcpu, int cpu, ...@@ -1098,8 +1098,8 @@ static void kvm_trap_emul_check_requests(struct kvm_vcpu *vcpu, int cpu,
kvm_mips_flush_gva_pt(kern_mm->pgd, KMF_GPA | KMF_KERN); kvm_mips_flush_gva_pt(kern_mm->pgd, KMF_GPA | KMF_KERN);
kvm_mips_flush_gva_pt(user_mm->pgd, KMF_GPA | KMF_USER); kvm_mips_flush_gva_pt(user_mm->pgd, KMF_GPA | KMF_USER);
for_each_possible_cpu(i) { for_each_possible_cpu(i) {
cpu_context(i, kern_mm) = 0; set_cpu_context(i, kern_mm, 0);
cpu_context(i, user_mm) = 0; set_cpu_context(i, user_mm, 0);
} }
/* Generate new ASID for current mode */ /* Generate new ASID for current mode */
...@@ -1211,7 +1211,7 @@ static void kvm_trap_emul_vcpu_reenter(struct kvm_run *run, ...@@ -1211,7 +1211,7 @@ static void kvm_trap_emul_vcpu_reenter(struct kvm_run *run,
if (gasid != vcpu->arch.last_user_gasid) { if (gasid != vcpu->arch.last_user_gasid) {
kvm_mips_flush_gva_pt(user_mm->pgd, KMF_USER); kvm_mips_flush_gva_pt(user_mm->pgd, KMF_USER);
for_each_possible_cpu(i) for_each_possible_cpu(i)
cpu_context(i, user_mm) = 0; set_cpu_context(i, user_mm, 0);
vcpu->arch.last_user_gasid = gasid; vcpu->arch.last_user_gasid = gasid;
} }
} }
......
...@@ -15,7 +15,8 @@ void get_new_mmu_context(struct mm_struct *mm) ...@@ -15,7 +15,8 @@ void get_new_mmu_context(struct mm_struct *mm)
local_flush_tlb_all(); /* start new asid cycle */ local_flush_tlb_all(); /* start new asid cycle */
} }
cpu_context(cpu, mm) = asid_cache(cpu) = asid; set_cpu_context(cpu, mm, asid);
asid_cache(cpu) = asid;
} }
void check_mmu_context(struct mm_struct *mm) void check_mmu_context(struct mm_struct *mm)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment