Commit 5d8c39f6 authored by Rusty Russell's avatar Rusty Russell

cpumask: use mm_cpumask() wrapper: ia64

Makes code futureproof against the impending change to mm->cpu_vm_mask.

It's also a chance to use the new cpumask_ ops which take a pointer
(the older ones are deprecated, but there's no hurry for arch code).
Signed-off-by: default avatarRusty Russell <rusty@rustcorp.com.au>
parent 2af51a3f
...@@ -87,7 +87,7 @@ get_mmu_context (struct mm_struct *mm) ...@@ -87,7 +87,7 @@ get_mmu_context (struct mm_struct *mm)
/* re-check, now that we've got the lock: */ /* re-check, now that we've got the lock: */
context = mm->context; context = mm->context;
if (context == 0) { if (context == 0) {
cpus_clear(mm->cpu_vm_mask); cpumask_clear(mm_cpumask(mm));
if (ia64_ctx.next >= ia64_ctx.limit) { if (ia64_ctx.next >= ia64_ctx.limit) {
ia64_ctx.next = find_next_zero_bit(ia64_ctx.bitmap, ia64_ctx.next = find_next_zero_bit(ia64_ctx.bitmap,
ia64_ctx.max_ctx, ia64_ctx.next); ia64_ctx.max_ctx, ia64_ctx.next);
...@@ -166,8 +166,8 @@ activate_context (struct mm_struct *mm) ...@@ -166,8 +166,8 @@ activate_context (struct mm_struct *mm)
do { do {
context = get_mmu_context(mm); context = get_mmu_context(mm);
if (!cpu_isset(smp_processor_id(), mm->cpu_vm_mask)) if (!cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm)))
cpu_set(smp_processor_id(), mm->cpu_vm_mask); cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
reload_context(context); reload_context(context);
/* /*
* in the unlikely event of a TLB-flush by another thread, * in the unlikely event of a TLB-flush by another thread,
......
...@@ -309,7 +309,7 @@ flush_tlb_range (struct vm_area_struct *vma, unsigned long start, ...@@ -309,7 +309,7 @@ flush_tlb_range (struct vm_area_struct *vma, unsigned long start,
preempt_disable(); preempt_disable();
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
if (mm != current->active_mm || cpus_weight(mm->cpu_vm_mask) != 1) { if (mm != current->active_mm || cpumask_weight(mm_cpumask(mm)) != 1) {
platform_global_tlb_purge(mm, start, end, nbits); platform_global_tlb_purge(mm, start, end, nbits);
preempt_enable(); preempt_enable();
return; return;
......
...@@ -133,7 +133,7 @@ sn2_ipi_flush_all_tlb(struct mm_struct *mm) ...@@ -133,7 +133,7 @@ sn2_ipi_flush_all_tlb(struct mm_struct *mm)
unsigned long itc; unsigned long itc;
itc = ia64_get_itc(); itc = ia64_get_itc();
smp_flush_tlb_cpumask(mm->cpu_vm_mask); smp_flush_tlb_cpumask(*mm_cpumask(mm));
itc = ia64_get_itc() - itc; itc = ia64_get_itc() - itc;
__get_cpu_var(ptcstats).shub_ipi_flushes_itc_clocks += itc; __get_cpu_var(ptcstats).shub_ipi_flushes_itc_clocks += itc;
__get_cpu_var(ptcstats).shub_ipi_flushes++; __get_cpu_var(ptcstats).shub_ipi_flushes++;
...@@ -182,7 +182,7 @@ sn2_global_tlb_purge(struct mm_struct *mm, unsigned long start, ...@@ -182,7 +182,7 @@ sn2_global_tlb_purge(struct mm_struct *mm, unsigned long start,
nodes_clear(nodes_flushed); nodes_clear(nodes_flushed);
i = 0; i = 0;
for_each_cpu_mask(cpu, mm->cpu_vm_mask) { for_each_cpu(cpu, mm_cpumask(mm)) {
cnode = cpu_to_node(cpu); cnode = cpu_to_node(cpu);
node_set(cnode, nodes_flushed); node_set(cnode, nodes_flushed);
lcpu = cpu; lcpu = cpu;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment