Commit ab67f600 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'smp-core-2020-01-28' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull core SMP updates from Thomas Gleixner:
 "A small set of SMP core code changes:

   - Rework the smp function call core code to avoid the allocation of
     an additional cpumask

   - Remove the not longer required GFP argument from on_each_cpu_cond()
     and on_each_cpu_cond_mask() and fixup the callers"

* tag 'smp-core-2020-01-28' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  smp: Remove allocation mask from on_each_cpu_cond.*()
  smp: Add a smp_cond_func_t argument to smp_call_function_many()
  smp: Use smp_cond_func_t as type for the conditional function
parents e279160f cb923159
...@@ -708,7 +708,7 @@ void native_flush_tlb_others(const struct cpumask *cpumask, ...@@ -708,7 +708,7 @@ void native_flush_tlb_others(const struct cpumask *cpumask,
(void *)info, 1); (void *)info, 1);
else else
on_each_cpu_cond_mask(tlb_is_not_lazy, flush_tlb_func_remote, on_each_cpu_cond_mask(tlb_is_not_lazy, flush_tlb_func_remote,
(void *)info, 1, GFP_ATOMIC, cpumask); (void *)info, 1, cpumask);
} }
/* /*
......
...@@ -1433,7 +1433,7 @@ static bool has_bh_in_lru(int cpu, void *dummy) ...@@ -1433,7 +1433,7 @@ static bool has_bh_in_lru(int cpu, void *dummy)
void invalidate_bh_lrus(void) void invalidate_bh_lrus(void)
{ {
on_each_cpu_cond(has_bh_in_lru, invalidate_bh_lru, NULL, 1, GFP_KERNEL); on_each_cpu_cond(has_bh_in_lru, invalidate_bh_lru, NULL, 1);
} }
EXPORT_SYMBOL_GPL(invalidate_bh_lrus); EXPORT_SYMBOL_GPL(invalidate_bh_lrus);
......
...@@ -15,6 +15,7 @@ ...@@ -15,6 +15,7 @@
#include <linux/llist.h> #include <linux/llist.h>
typedef void (*smp_call_func_t)(void *info); typedef void (*smp_call_func_t)(void *info);
typedef bool (*smp_cond_func_t)(int cpu, void *info);
struct __call_single_data { struct __call_single_data {
struct llist_node llist; struct llist_node llist;
smp_call_func_t func; smp_call_func_t func;
...@@ -49,13 +50,11 @@ void on_each_cpu_mask(const struct cpumask *mask, smp_call_func_t func, ...@@ -49,13 +50,11 @@ void on_each_cpu_mask(const struct cpumask *mask, smp_call_func_t func,
* cond_func returns a positive value. This may include the local * cond_func returns a positive value. This may include the local
* processor. * processor.
*/ */
void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info), void on_each_cpu_cond(smp_cond_func_t cond_func, smp_call_func_t func,
smp_call_func_t func, void *info, bool wait, void *info, bool wait);
gfp_t gfp_flags);
void on_each_cpu_cond_mask(bool (*cond_func)(int cpu, void *info), void on_each_cpu_cond_mask(smp_cond_func_t cond_func, smp_call_func_t func,
smp_call_func_t func, void *info, bool wait, void *info, bool wait, const struct cpumask *mask);
gfp_t gfp_flags, const struct cpumask *mask);
int smp_call_function_single_async(int cpu, call_single_data_t *csd); int smp_call_function_single_async(int cpu, call_single_data_t *csd);
......
...@@ -395,22 +395,9 @@ int smp_call_function_any(const struct cpumask *mask, ...@@ -395,22 +395,9 @@ int smp_call_function_any(const struct cpumask *mask,
} }
EXPORT_SYMBOL_GPL(smp_call_function_any); EXPORT_SYMBOL_GPL(smp_call_function_any);
/** static void smp_call_function_many_cond(const struct cpumask *mask,
* smp_call_function_many(): Run a function on a set of other CPUs. smp_call_func_t func, void *info,
* @mask: The set of cpus to run on (only runs on online subset). bool wait, smp_cond_func_t cond_func)
* @func: The function to run. This must be fast and non-blocking.
* @info: An arbitrary pointer to pass to the function.
* @wait: If true, wait (atomically) until function has completed
* on other CPUs.
*
* If @wait is true, then returns once @func has returned.
*
* You must not call this function with disabled interrupts or from a
* hardware interrupt handler or from a bottom half handler. Preemption
* must be disabled when calling this function.
*/
void smp_call_function_many(const struct cpumask *mask,
smp_call_func_t func, void *info, bool wait)
{ {
struct call_function_data *cfd; struct call_function_data *cfd;
int cpu, next_cpu, this_cpu = smp_processor_id(); int cpu, next_cpu, this_cpu = smp_processor_id();
...@@ -448,7 +435,8 @@ void smp_call_function_many(const struct cpumask *mask, ...@@ -448,7 +435,8 @@ void smp_call_function_many(const struct cpumask *mask,
/* Fastpath: do that cpu by itself. */ /* Fastpath: do that cpu by itself. */
if (next_cpu >= nr_cpu_ids) { if (next_cpu >= nr_cpu_ids) {
smp_call_function_single(cpu, func, info, wait); if (!cond_func || (cond_func && cond_func(cpu, info)))
smp_call_function_single(cpu, func, info, wait);
return; return;
} }
...@@ -465,6 +453,9 @@ void smp_call_function_many(const struct cpumask *mask, ...@@ -465,6 +453,9 @@ void smp_call_function_many(const struct cpumask *mask,
for_each_cpu(cpu, cfd->cpumask) { for_each_cpu(cpu, cfd->cpumask) {
call_single_data_t *csd = per_cpu_ptr(cfd->csd, cpu); call_single_data_t *csd = per_cpu_ptr(cfd->csd, cpu);
if (cond_func && !cond_func(cpu, info))
continue;
csd_lock(csd); csd_lock(csd);
if (wait) if (wait)
csd->flags |= CSD_FLAG_SYNCHRONOUS; csd->flags |= CSD_FLAG_SYNCHRONOUS;
...@@ -486,6 +477,26 @@ void smp_call_function_many(const struct cpumask *mask, ...@@ -486,6 +477,26 @@ void smp_call_function_many(const struct cpumask *mask,
} }
} }
} }
/**
* smp_call_function_many(): Run a function on a set of other CPUs.
* @mask: The set of cpus to run on (only runs on online subset).
* @func: The function to run. This must be fast and non-blocking.
* @info: An arbitrary pointer to pass to the function.
* @wait: If true, wait (atomically) until function has completed
* on other CPUs.
*
* If @wait is true, then returns once @func has returned.
*
* You must not call this function with disabled interrupts or from a
* hardware interrupt handler or from a bottom half handler. Preemption
* must be disabled when calling this function.
*/
void smp_call_function_many(const struct cpumask *mask,
smp_call_func_t func, void *info, bool wait)
{
smp_call_function_many_cond(mask, func, info, wait, NULL);
}
EXPORT_SYMBOL(smp_call_function_many); EXPORT_SYMBOL(smp_call_function_many);
/** /**
...@@ -668,11 +679,6 @@ EXPORT_SYMBOL(on_each_cpu_mask); ...@@ -668,11 +679,6 @@ EXPORT_SYMBOL(on_each_cpu_mask);
* @info: An arbitrary pointer to pass to both functions. * @info: An arbitrary pointer to pass to both functions.
* @wait: If true, wait (atomically) until function has * @wait: If true, wait (atomically) until function has
* completed on other CPUs. * completed on other CPUs.
* @gfp_flags: GFP flags to use when allocating the cpumask
* used internally by the function.
*
* The function might sleep if the GFP flags indicates a non
* atomic allocation is allowed.
* *
* Preemption is disabled to protect against CPUs going offline but not online. * Preemption is disabled to protect against CPUs going offline but not online.
* CPUs going online during the call will not be seen or sent an IPI. * CPUs going online during the call will not be seen or sent an IPI.
...@@ -680,46 +686,27 @@ EXPORT_SYMBOL(on_each_cpu_mask); ...@@ -680,46 +686,27 @@ EXPORT_SYMBOL(on_each_cpu_mask);
* You must not call this function with disabled interrupts or * You must not call this function with disabled interrupts or
* from a hardware interrupt handler or from a bottom half handler. * from a hardware interrupt handler or from a bottom half handler.
*/ */
void on_each_cpu_cond_mask(bool (*cond_func)(int cpu, void *info), void on_each_cpu_cond_mask(smp_cond_func_t cond_func, smp_call_func_t func,
smp_call_func_t func, void *info, bool wait, void *info, bool wait, const struct cpumask *mask)
gfp_t gfp_flags, const struct cpumask *mask)
{ {
cpumask_var_t cpus; int cpu = get_cpu();
int cpu, ret;
smp_call_function_many_cond(mask, func, info, wait, cond_func);
might_sleep_if(gfpflags_allow_blocking(gfp_flags)); if (cpumask_test_cpu(cpu, mask) && cond_func(cpu, info)) {
unsigned long flags;
if (likely(zalloc_cpumask_var(&cpus, (gfp_flags|__GFP_NOWARN)))) {
preempt_disable(); local_irq_save(flags);
for_each_cpu(cpu, mask) func(info);
if (cond_func(cpu, info)) local_irq_restore(flags);
__cpumask_set_cpu(cpu, cpus);
on_each_cpu_mask(cpus, func, info, wait);
preempt_enable();
free_cpumask_var(cpus);
} else {
/*
* No free cpumask, bother. No matter, we'll
* just have to IPI them one by one.
*/
preempt_disable();
for_each_cpu(cpu, mask)
if (cond_func(cpu, info)) {
ret = smp_call_function_single(cpu, func,
info, wait);
WARN_ON_ONCE(ret);
}
preempt_enable();
} }
put_cpu();
} }
EXPORT_SYMBOL(on_each_cpu_cond_mask); EXPORT_SYMBOL(on_each_cpu_cond_mask);
void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info), void on_each_cpu_cond(smp_cond_func_t cond_func, smp_call_func_t func,
smp_call_func_t func, void *info, bool wait, void *info, bool wait)
gfp_t gfp_flags)
{ {
on_each_cpu_cond_mask(cond_func, func, info, wait, gfp_flags, on_each_cpu_cond_mask(cond_func, func, info, wait, cpu_online_mask);
cpu_online_mask);
} }
EXPORT_SYMBOL(on_each_cpu_cond); EXPORT_SYMBOL(on_each_cpu_cond);
......
...@@ -68,9 +68,8 @@ EXPORT_SYMBOL(on_each_cpu_mask); ...@@ -68,9 +68,8 @@ EXPORT_SYMBOL(on_each_cpu_mask);
* Preemption is disabled here to make sure the cond_func is called under the * Preemption is disabled here to make sure the cond_func is called under the
* same condtions in UP and SMP. * same condtions in UP and SMP.
*/ */
void on_each_cpu_cond_mask(bool (*cond_func)(int cpu, void *info), void on_each_cpu_cond_mask(smp_cond_func_t cond_func, smp_call_func_t func,
smp_call_func_t func, void *info, bool wait, void *info, bool wait, const struct cpumask *mask)
gfp_t gfp_flags, const struct cpumask *mask)
{ {
unsigned long flags; unsigned long flags;
...@@ -84,11 +83,10 @@ void on_each_cpu_cond_mask(bool (*cond_func)(int cpu, void *info), ...@@ -84,11 +83,10 @@ void on_each_cpu_cond_mask(bool (*cond_func)(int cpu, void *info),
} }
EXPORT_SYMBOL(on_each_cpu_cond_mask); EXPORT_SYMBOL(on_each_cpu_cond_mask);
void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info), void on_each_cpu_cond(smp_cond_func_t cond_func, smp_call_func_t func,
smp_call_func_t func, void *info, bool wait, void *info, bool wait)
gfp_t gfp_flags)
{ {
on_each_cpu_cond_mask(cond_func, func, info, wait, gfp_flags, NULL); on_each_cpu_cond_mask(cond_func, func, info, wait, NULL);
} }
EXPORT_SYMBOL(on_each_cpu_cond); EXPORT_SYMBOL(on_each_cpu_cond);
......
...@@ -2341,7 +2341,7 @@ static bool has_cpu_slab(int cpu, void *info) ...@@ -2341,7 +2341,7 @@ static bool has_cpu_slab(int cpu, void *info)
static void flush_all(struct kmem_cache *s) static void flush_all(struct kmem_cache *s)
{ {
on_each_cpu_cond(has_cpu_slab, flush_cpu_slab, s, 1, GFP_ATOMIC); on_each_cpu_cond(has_cpu_slab, flush_cpu_slab, s, 1);
} }
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment