Commit 78f1c4d6 authored by Rusty Russell's avatar Rusty Russell

cpumask: use mm_cpumask() wrapper: x86

Makes code futureproof against the impending change to mm->cpu_vm_mask (to be a pointer).

It's also a chance to use the new cpumask_ ops which take a pointer
(the older ones are deprecated, but there's no hurry for arch code).
Signed-off-by: default avatarRusty Russell <rusty@rustcorp.com.au>
parent fa40699b
...@@ -37,12 +37,12 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, ...@@ -37,12 +37,12 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
if (likely(prev != next)) { if (likely(prev != next)) {
/* stop flush ipis for the previous mm */ /* stop flush ipis for the previous mm */
cpu_clear(cpu, prev->cpu_vm_mask); cpumask_clear_cpu(cpu, mm_cpumask(prev));
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
percpu_write(cpu_tlbstate.state, TLBSTATE_OK); percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
percpu_write(cpu_tlbstate.active_mm, next); percpu_write(cpu_tlbstate.active_mm, next);
#endif #endif
cpu_set(cpu, next->cpu_vm_mask); cpumask_set_cpu(cpu, mm_cpumask(next));
/* Re-load page tables */ /* Re-load page tables */
load_cr3(next->pgd); load_cr3(next->pgd);
...@@ -58,7 +58,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, ...@@ -58,7 +58,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
percpu_write(cpu_tlbstate.state, TLBSTATE_OK); percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next); BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next);
if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) { if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next))) {
/* We were in lazy tlb mode and leave_mm disabled /* We were in lazy tlb mode and leave_mm disabled
* tlb flush IPI delivery. We must reload CR3 * tlb flush IPI delivery. We must reload CR3
* to make sure to use no freed page tables. * to make sure to use no freed page tables.
......
...@@ -67,8 +67,8 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload) ...@@ -67,8 +67,8 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
preempt_disable(); preempt_disable();
load_LDT(pc); load_LDT(pc);
if (!cpus_equal(current->mm->cpu_vm_mask, if (!cpumask_equal(mm_cpumask(current->mm),
cpumask_of_cpu(smp_processor_id()))) cpumask_of(smp_processor_id())))
smp_call_function(flush_ldt, current->mm, 1); smp_call_function(flush_ldt, current->mm, 1);
preempt_enable(); preempt_enable();
#else #else
......
...@@ -59,7 +59,8 @@ void leave_mm(int cpu) ...@@ -59,7 +59,8 @@ void leave_mm(int cpu)
{ {
if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK) if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
BUG(); BUG();
cpu_clear(cpu, percpu_read(cpu_tlbstate.active_mm)->cpu_vm_mask); cpumask_clear_cpu(cpu,
mm_cpumask(percpu_read(cpu_tlbstate.active_mm)));
load_cr3(swapper_pg_dir); load_cr3(swapper_pg_dir);
} }
EXPORT_SYMBOL_GPL(leave_mm); EXPORT_SYMBOL_GPL(leave_mm);
...@@ -234,8 +235,8 @@ void flush_tlb_current_task(void) ...@@ -234,8 +235,8 @@ void flush_tlb_current_task(void)
preempt_disable(); preempt_disable();
local_flush_tlb(); local_flush_tlb();
if (cpumask_any_but(&mm->cpu_vm_mask, smp_processor_id()) < nr_cpu_ids) if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
flush_tlb_others(&mm->cpu_vm_mask, mm, TLB_FLUSH_ALL); flush_tlb_others(mm_cpumask(mm), mm, TLB_FLUSH_ALL);
preempt_enable(); preempt_enable();
} }
...@@ -249,8 +250,8 @@ void flush_tlb_mm(struct mm_struct *mm) ...@@ -249,8 +250,8 @@ void flush_tlb_mm(struct mm_struct *mm)
else else
leave_mm(smp_processor_id()); leave_mm(smp_processor_id());
} }
if (cpumask_any_but(&mm->cpu_vm_mask, smp_processor_id()) < nr_cpu_ids) if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
flush_tlb_others(&mm->cpu_vm_mask, mm, TLB_FLUSH_ALL); flush_tlb_others(mm_cpumask(mm), mm, TLB_FLUSH_ALL);
preempt_enable(); preempt_enable();
} }
...@@ -268,8 +269,8 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long va) ...@@ -268,8 +269,8 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long va)
leave_mm(smp_processor_id()); leave_mm(smp_processor_id());
} }
if (cpumask_any_but(&mm->cpu_vm_mask, smp_processor_id()) < nr_cpu_ids) if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
flush_tlb_others(&mm->cpu_vm_mask, mm, va); flush_tlb_others(mm_cpumask(mm), mm, va);
preempt_enable(); preempt_enable();
} }
......
...@@ -1165,14 +1165,14 @@ static void xen_drop_mm_ref(struct mm_struct *mm) ...@@ -1165,14 +1165,14 @@ static void xen_drop_mm_ref(struct mm_struct *mm)
/* Get the "official" set of cpus referring to our pagetable. */ /* Get the "official" set of cpus referring to our pagetable. */
if (!alloc_cpumask_var(&mask, GFP_ATOMIC)) { if (!alloc_cpumask_var(&mask, GFP_ATOMIC)) {
for_each_online_cpu(cpu) { for_each_online_cpu(cpu) {
if (!cpumask_test_cpu(cpu, &mm->cpu_vm_mask) if (!cpumask_test_cpu(cpu, mm_cpumask(mm))
&& per_cpu(xen_current_cr3, cpu) != __pa(mm->pgd)) && per_cpu(xen_current_cr3, cpu) != __pa(mm->pgd))
continue; continue;
smp_call_function_single(cpu, drop_other_mm_ref, mm, 1); smp_call_function_single(cpu, drop_other_mm_ref, mm, 1);
} }
return; return;
} }
cpumask_copy(mask, &mm->cpu_vm_mask); cpumask_copy(mask, mm_cpumask(mm));
/* It's possible that a vcpu may have a stale reference to our /* It's possible that a vcpu may have a stale reference to our
cr3, because its in lazy mode, and it hasn't yet flushed cr3, because its in lazy mode, and it hasn't yet flushed
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment