Commit 7ee681b2 authored by Thomas Gleixner's avatar Thomas Gleixner Committed by Ingo Molnar

workqueue: Convert to state machine callbacks

Get rid of the prio ordering of the separate notifiers and use a proper state
callback pair.
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Signed-off-by: default avatarAnna-Maria Gleixner <anna-maria@linutronix.de>
Reviewed-by: default avatarSebastian Andrzej Siewior <bigeasy@linutronix.de>
Acked-by: default avatarTejun Heo <tj@kernel.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Lai Jiangshan <jiangshanlai@gmail.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Nicolas Iooss <nicolas.iooss_linux@m4x.org>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rasmus Villemoes <linux@rasmusvillemoes.dk>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Cc: rt@linutronix.de
Link: http://lkml.kernel.org/r/20160713153335.197083890@linutronix.deSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent c6a84daa
...@@ -55,15 +55,6 @@ extern ssize_t arch_cpu_release(const char *, size_t); ...@@ -55,15 +55,6 @@ extern ssize_t arch_cpu_release(const char *, size_t);
#endif #endif
struct notifier_block; struct notifier_block;
/*
* CPU notifier priorities.
*/
enum {
/* bring up workqueues before normal notifiers and down after */
CPU_PRI_WORKQUEUE_UP = 5,
CPU_PRI_WORKQUEUE_DOWN = -5,
};
#define CPU_ONLINE 0x0002 /* CPU (unsigned)v is up */ #define CPU_ONLINE 0x0002 /* CPU (unsigned)v is up */
#define CPU_UP_PREPARE 0x0003 /* CPU (unsigned)v coming up */ #define CPU_UP_PREPARE 0x0003 /* CPU (unsigned)v coming up */
#define CPU_UP_CANCELED 0x0004 /* CPU (unsigned)v NOT coming up */ #define CPU_UP_CANCELED 0x0004 /* CPU (unsigned)v NOT coming up */
......
...@@ -12,6 +12,7 @@ enum cpuhp_state { ...@@ -12,6 +12,7 @@ enum cpuhp_state {
CPUHP_PERF_BFIN, CPUHP_PERF_BFIN,
CPUHP_PERF_POWER, CPUHP_PERF_POWER,
CPUHP_PERF_SUPERH, CPUHP_PERF_SUPERH,
CPUHP_WORKQUEUE_PREP,
CPUHP_NOTIFY_PREPARE, CPUHP_NOTIFY_PREPARE,
CPUHP_BRINGUP_CPU, CPUHP_BRINGUP_CPU,
CPUHP_AP_IDLE_DEAD, CPUHP_AP_IDLE_DEAD,
...@@ -49,6 +50,7 @@ enum cpuhp_state { ...@@ -49,6 +50,7 @@ enum cpuhp_state {
CPUHP_AP_PERF_S390_SF_ONLINE, CPUHP_AP_PERF_S390_SF_ONLINE,
CPUHP_AP_PERF_ARM_CCI_ONLINE, CPUHP_AP_PERF_ARM_CCI_ONLINE,
CPUHP_AP_PERF_ARM_CCN_ONLINE, CPUHP_AP_PERF_ARM_CCN_ONLINE,
CPUHP_AP_WORKQUEUE_ONLINE,
CPUHP_AP_NOTIFY_ONLINE, CPUHP_AP_NOTIFY_ONLINE,
CPUHP_AP_ONLINE_DYN, CPUHP_AP_ONLINE_DYN,
CPUHP_AP_ONLINE_DYN_END = CPUHP_AP_ONLINE_DYN + 30, CPUHP_AP_ONLINE_DYN_END = CPUHP_AP_ONLINE_DYN + 30,
......
...@@ -625,4 +625,10 @@ void wq_watchdog_touch(int cpu); ...@@ -625,4 +625,10 @@ void wq_watchdog_touch(int cpu);
static inline void wq_watchdog_touch(int cpu) { } static inline void wq_watchdog_touch(int cpu) { }
#endif /* CONFIG_WQ_WATCHDOG */ #endif /* CONFIG_WQ_WATCHDOG */
#ifdef CONFIG_SMP
int workqueue_prepare_cpu(unsigned int cpu);
int workqueue_online_cpu(unsigned int cpu);
int workqueue_offline_cpu(unsigned int cpu);
#endif
#endif #endif
...@@ -1185,6 +1185,11 @@ static struct cpuhp_step cpuhp_bp_states[] = { ...@@ -1185,6 +1185,11 @@ static struct cpuhp_step cpuhp_bp_states[] = {
.startup = perf_event_init_cpu, .startup = perf_event_init_cpu,
.teardown = perf_event_exit_cpu, .teardown = perf_event_exit_cpu,
}, },
[CPUHP_WORKQUEUE_PREP] = {
.name = "workqueue prepare",
.startup = workqueue_prepare_cpu,
.teardown = NULL,
},
/* /*
* Preparatory and dead notifiers. Will be replaced once the notifiers * Preparatory and dead notifiers. Will be replaced once the notifiers
* are converted to states. * are converted to states.
...@@ -1267,6 +1272,11 @@ static struct cpuhp_step cpuhp_ap_states[] = { ...@@ -1267,6 +1272,11 @@ static struct cpuhp_step cpuhp_ap_states[] = {
.startup = perf_event_init_cpu, .startup = perf_event_init_cpu,
.teardown = perf_event_exit_cpu, .teardown = perf_event_exit_cpu,
}, },
[CPUHP_AP_WORKQUEUE_ONLINE] = {
.name = "workqueue online",
.startup = workqueue_online_cpu,
.teardown = workqueue_offline_cpu,
},
/* /*
* Online/down_prepare notifiers. Will be removed once the notifiers * Online/down_prepare notifiers. Will be removed once the notifiers
......
...@@ -4611,84 +4611,65 @@ static void restore_unbound_workers_cpumask(struct worker_pool *pool, int cpu) ...@@ -4611,84 +4611,65 @@ static void restore_unbound_workers_cpumask(struct worker_pool *pool, int cpu)
pool->attrs->cpumask) < 0); pool->attrs->cpumask) < 0);
} }
/* int workqueue_prepare_cpu(unsigned int cpu)
* Workqueues should be brought up before normal priority CPU notifiers. {
* This will be registered high priority CPU notifier. struct worker_pool *pool;
*/
static int workqueue_cpu_up_callback(struct notifier_block *nfb, for_each_cpu_worker_pool(pool, cpu) {
unsigned long action, if (pool->nr_workers)
void *hcpu) continue;
if (!create_worker(pool))
return -ENOMEM;
}
return 0;
}
int workqueue_online_cpu(unsigned int cpu)
{ {
int cpu = (unsigned long)hcpu;
struct worker_pool *pool; struct worker_pool *pool;
struct workqueue_struct *wq; struct workqueue_struct *wq;
int pi; int pi;
switch (action & ~CPU_TASKS_FROZEN) { mutex_lock(&wq_pool_mutex);
case CPU_UP_PREPARE:
for_each_cpu_worker_pool(pool, cpu) {
if (pool->nr_workers)
continue;
if (!create_worker(pool))
return NOTIFY_BAD;
}
break;
case CPU_DOWN_FAILED:
case CPU_ONLINE:
mutex_lock(&wq_pool_mutex);
for_each_pool(pool, pi) { for_each_pool(pool, pi) {
mutex_lock(&pool->attach_mutex); mutex_lock(&pool->attach_mutex);
if (pool->cpu == cpu) if (pool->cpu == cpu)
rebind_workers(pool); rebind_workers(pool);
else if (pool->cpu < 0) else if (pool->cpu < 0)
restore_unbound_workers_cpumask(pool, cpu); restore_unbound_workers_cpumask(pool, cpu);
mutex_unlock(&pool->attach_mutex); mutex_unlock(&pool->attach_mutex);
} }
/* update NUMA affinity of unbound workqueues */ /* update NUMA affinity of unbound workqueues */
list_for_each_entry(wq, &workqueues, list) list_for_each_entry(wq, &workqueues, list)
wq_update_unbound_numa(wq, cpu, true); wq_update_unbound_numa(wq, cpu, true);
mutex_unlock(&wq_pool_mutex); mutex_unlock(&wq_pool_mutex);
break; return 0;
}
return NOTIFY_OK;
} }
/* int workqueue_offline_cpu(unsigned int cpu)
* Workqueues should be brought down after normal priority CPU notifiers.
* This will be registered as low priority CPU notifier.
*/
static int workqueue_cpu_down_callback(struct notifier_block *nfb,
unsigned long action,
void *hcpu)
{ {
int cpu = (unsigned long)hcpu;
struct work_struct unbind_work; struct work_struct unbind_work;
struct workqueue_struct *wq; struct workqueue_struct *wq;
switch (action & ~CPU_TASKS_FROZEN) { /* unbinding per-cpu workers should happen on the local CPU */
case CPU_DOWN_PREPARE: INIT_WORK_ONSTACK(&unbind_work, wq_unbind_fn);
/* unbinding per-cpu workers should happen on the local CPU */ queue_work_on(cpu, system_highpri_wq, &unbind_work);
INIT_WORK_ONSTACK(&unbind_work, wq_unbind_fn);
queue_work_on(cpu, system_highpri_wq, &unbind_work); /* update NUMA affinity of unbound workqueues */
mutex_lock(&wq_pool_mutex);
/* update NUMA affinity of unbound workqueues */ list_for_each_entry(wq, &workqueues, list)
mutex_lock(&wq_pool_mutex); wq_update_unbound_numa(wq, cpu, false);
list_for_each_entry(wq, &workqueues, list) mutex_unlock(&wq_pool_mutex);
wq_update_unbound_numa(wq, cpu, false);
mutex_unlock(&wq_pool_mutex); /* wait for per-cpu unbinding to finish */
flush_work(&unbind_work);
/* wait for per-cpu unbinding to finish */ destroy_work_on_stack(&unbind_work);
flush_work(&unbind_work); return 0;
destroy_work_on_stack(&unbind_work);
break;
}
return NOTIFY_OK;
} }
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
...@@ -5490,9 +5471,6 @@ static int __init init_workqueues(void) ...@@ -5490,9 +5471,6 @@ static int __init init_workqueues(void)
pwq_cache = KMEM_CACHE(pool_workqueue, SLAB_PANIC); pwq_cache = KMEM_CACHE(pool_workqueue, SLAB_PANIC);
cpu_notifier(workqueue_cpu_up_callback, CPU_PRI_WORKQUEUE_UP);
hotcpu_notifier(workqueue_cpu_down_callback, CPU_PRI_WORKQUEUE_DOWN);
wq_numa_init(); wq_numa_init();
/* initialize CPU pools */ /* initialize CPU pools */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment