Commit dde3626f authored by Nadav Amit's avatar Nadav Amit Committed by Thomas Gleixner

x86/apic: Use non-atomic operations when possible

Using __clear_bit() and __cpumask_clear_cpu() is more efficient than using
their atomic counterparts.

Use them when atomicity is not needed, such as when manipulating bitmasks
that are on the stack.
Signed-off-by: default avatarNadav Amit <namit@vmware.com>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Link: https://lkml.kernel.org/r/20190613064813.8102-10-namit@vmware.com
parent 748b170c
...@@ -78,7 +78,7 @@ flat_send_IPI_mask_allbutself(const struct cpumask *cpumask, int vector) ...@@ -78,7 +78,7 @@ flat_send_IPI_mask_allbutself(const struct cpumask *cpumask, int vector)
int cpu = smp_processor_id(); int cpu = smp_processor_id();
if (cpu < BITS_PER_LONG) if (cpu < BITS_PER_LONG)
clear_bit(cpu, &mask); __clear_bit(cpu, &mask);
_flat_send_IPI_mask(mask, vector); _flat_send_IPI_mask(mask, vector);
} }
...@@ -92,7 +92,7 @@ static void flat_send_IPI_allbutself(int vector) ...@@ -92,7 +92,7 @@ static void flat_send_IPI_allbutself(int vector)
unsigned long mask = cpumask_bits(cpu_online_mask)[0]; unsigned long mask = cpumask_bits(cpu_online_mask)[0];
if (cpu < BITS_PER_LONG) if (cpu < BITS_PER_LONG)
clear_bit(cpu, &mask); __clear_bit(cpu, &mask);
_flat_send_IPI_mask(mask, vector); _flat_send_IPI_mask(mask, vector);
} }
......
...@@ -50,7 +50,7 @@ __x2apic_send_IPI_mask(const struct cpumask *mask, int vector, int apic_dest) ...@@ -50,7 +50,7 @@ __x2apic_send_IPI_mask(const struct cpumask *mask, int vector, int apic_dest)
cpumask_copy(tmpmsk, mask); cpumask_copy(tmpmsk, mask);
/* If IPI should not be sent to self, clear current CPU */ /* If IPI should not be sent to self, clear current CPU */
if (apic_dest != APIC_DEST_ALLINC) if (apic_dest != APIC_DEST_ALLINC)
cpumask_clear_cpu(smp_processor_id(), tmpmsk); __cpumask_clear_cpu(smp_processor_id(), tmpmsk);
/* Collapse cpus in a cluster so a single IPI per cluster is sent */ /* Collapse cpus in a cluster so a single IPI per cluster is sent */
for_each_cpu(cpu, tmpmsk) { for_each_cpu(cpu, tmpmsk) {
......
...@@ -146,7 +146,7 @@ void native_send_call_func_ipi(const struct cpumask *mask) ...@@ -146,7 +146,7 @@ void native_send_call_func_ipi(const struct cpumask *mask)
} }
cpumask_copy(allbutself, cpu_online_mask); cpumask_copy(allbutself, cpu_online_mask);
cpumask_clear_cpu(smp_processor_id(), allbutself); __cpumask_clear_cpu(smp_processor_id(), allbutself);
if (cpumask_equal(mask, allbutself) && if (cpumask_equal(mask, allbutself) &&
cpumask_equal(cpu_online_mask, cpu_callout_mask)) cpumask_equal(cpu_online_mask, cpu_callout_mask))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment