Commit 020b37ac authored by Rusty Russell's avatar Rusty Russell Committed by Ingo Molnar

x86: Fix up obsolete __cpu_set() function usage

Thanks to spatch, plus manual removal of "&*".
Signed-off-by: default avatarRusty Russell <rusty@rustcorp.com.au>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/r/1425296150-4722-8-git-send-email-rusty@rustcorp.com.auSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent a38ecbbd
...@@ -135,12 +135,12 @@ static void init_x2apic_ldr(void) ...@@ -135,12 +135,12 @@ static void init_x2apic_ldr(void)
per_cpu(x86_cpu_to_logical_apicid, this_cpu) = apic_read(APIC_LDR); per_cpu(x86_cpu_to_logical_apicid, this_cpu) = apic_read(APIC_LDR);
__cpu_set(this_cpu, per_cpu(cpus_in_cluster, this_cpu)); cpumask_set_cpu(this_cpu, per_cpu(cpus_in_cluster, this_cpu));
for_each_online_cpu(cpu) { for_each_online_cpu(cpu) {
if (x2apic_cluster(this_cpu) != x2apic_cluster(cpu)) if (x2apic_cluster(this_cpu) != x2apic_cluster(cpu))
continue; continue;
__cpu_set(this_cpu, per_cpu(cpus_in_cluster, cpu)); cpumask_set_cpu(this_cpu, per_cpu(cpus_in_cluster, cpu));
__cpu_set(cpu, per_cpu(cpus_in_cluster, this_cpu)); cpumask_set_cpu(cpu, per_cpu(cpus_in_cluster, this_cpu));
} }
} }
...@@ -195,7 +195,7 @@ static int x2apic_init_cpu_notifier(void) ...@@ -195,7 +195,7 @@ static int x2apic_init_cpu_notifier(void)
BUG_ON(!per_cpu(cpus_in_cluster, cpu) || !per_cpu(ipi_mask, cpu)); BUG_ON(!per_cpu(cpus_in_cluster, cpu) || !per_cpu(ipi_mask, cpu));
__cpu_set(cpu, per_cpu(cpus_in_cluster, cpu)); cpumask_set_cpu(cpu, per_cpu(cpus_in_cluster, cpu));
register_hotcpu_notifier(&x2apic_cpu_notifier); register_hotcpu_notifier(&x2apic_cpu_notifier);
return 1; return 1;
} }
......
...@@ -295,7 +295,7 @@ int check_irq_vectors_for_cpu_disable(void) ...@@ -295,7 +295,7 @@ int check_irq_vectors_for_cpu_disable(void)
this_cpu = smp_processor_id(); this_cpu = smp_processor_id();
cpumask_copy(&online_new, cpu_online_mask); cpumask_copy(&online_new, cpu_online_mask);
cpu_clear(this_cpu, online_new); cpumask_clear_cpu(this_cpu, &online_new);
this_count = 0; this_count = 0;
for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) { for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) {
...@@ -307,7 +307,7 @@ int check_irq_vectors_for_cpu_disable(void) ...@@ -307,7 +307,7 @@ int check_irq_vectors_for_cpu_disable(void)
data = irq_desc_get_irq_data(desc); data = irq_desc_get_irq_data(desc);
cpumask_copy(&affinity_new, data->affinity); cpumask_copy(&affinity_new, data->affinity);
cpu_clear(this_cpu, affinity_new); cpumask_clear_cpu(this_cpu, &affinity_new);
/* Do not count inactive or per-cpu irqs. */ /* Do not count inactive or per-cpu irqs. */
if (!irq_has_action(irq) || irqd_is_per_cpu(data)) if (!irq_has_action(irq) || irqd_is_per_cpu(data))
......
...@@ -415,7 +415,7 @@ static void reset_with_ipi(struct pnmask *distribution, struct bau_control *bcp) ...@@ -415,7 +415,7 @@ static void reset_with_ipi(struct pnmask *distribution, struct bau_control *bcp)
struct reset_args reset_args; struct reset_args reset_args;
reset_args.sender = sender; reset_args.sender = sender;
cpus_clear(*mask); cpumask_clear(mask);
/* find a single cpu for each uvhub in this distribution mask */ /* find a single cpu for each uvhub in this distribution mask */
maskbits = sizeof(struct pnmask) * BITSPERBYTE; maskbits = sizeof(struct pnmask) * BITSPERBYTE;
/* each bit is a pnode relative to the partition base pnode */ /* each bit is a pnode relative to the partition base pnode */
...@@ -425,7 +425,7 @@ static void reset_with_ipi(struct pnmask *distribution, struct bau_control *bcp) ...@@ -425,7 +425,7 @@ static void reset_with_ipi(struct pnmask *distribution, struct bau_control *bcp)
continue; continue;
apnode = pnode + bcp->partition_base_pnode; apnode = pnode + bcp->partition_base_pnode;
cpu = pnode_to_first_cpu(apnode, smaster); cpu = pnode_to_first_cpu(apnode, smaster);
cpu_set(cpu, *mask); cpumask_set_cpu(cpu, mask);
} }
/* IPI all cpus; preemption is already disabled */ /* IPI all cpus; preemption is already disabled */
...@@ -1126,7 +1126,7 @@ const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask, ...@@ -1126,7 +1126,7 @@ const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
/* don't actually do a shootdown of the local cpu */ /* don't actually do a shootdown of the local cpu */
cpumask_andnot(flush_mask, cpumask, cpumask_of(cpu)); cpumask_andnot(flush_mask, cpumask, cpumask_of(cpu));
if (cpu_isset(cpu, *cpumask)) if (cpumask_test_cpu(cpu, cpumask))
stat->s_ntargself++; stat->s_ntargself++;
bau_desc = bcp->descriptor_base; bau_desc = bcp->descriptor_base;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment