Commit ec22ca5e authored by Tejun Heo's avatar Tejun Heo

workqueue: move global_cwq->cpu to worker_pool

Move gcwq->cpu to pool->cpu.  This introduces a couple places where
gcwq->pools[0].cpu is used.  These will soon go away as gcwq is
further reduced.

This is part of an effort to remove global_cwq and make worker_pool
the top level abstraction, which in turn will help implementing worker
pools with user-specified attributes.
Signed-off-by: default avatarTejun Heo <tj@kernel.org>
Reviewed-by: default avatarLai Jiangshan <laijs@cn.fujitsu.com>
parent c9e7cf27
...@@ -54,7 +54,7 @@ TRACE_EVENT(workqueue_queue_work, ...@@ -54,7 +54,7 @@ TRACE_EVENT(workqueue_queue_work,
__entry->function = work->func; __entry->function = work->func;
__entry->workqueue = cwq->wq; __entry->workqueue = cwq->wq;
__entry->req_cpu = req_cpu; __entry->req_cpu = req_cpu;
__entry->cpu = cwq->pool->gcwq->cpu; __entry->cpu = cwq->pool->cpu;
), ),
TP_printk("work struct=%p function=%pf workqueue=%p req_cpu=%u cpu=%u", TP_printk("work struct=%p function=%pf workqueue=%p req_cpu=%u cpu=%u",
......
...@@ -124,6 +124,7 @@ enum { ...@@ -124,6 +124,7 @@ enum {
struct worker_pool { struct worker_pool {
struct global_cwq *gcwq; /* I: the owning gcwq */ struct global_cwq *gcwq; /* I: the owning gcwq */
unsigned int cpu; /* I: the associated cpu */
int id; /* I: pool ID */ int id; /* I: pool ID */
unsigned int flags; /* X: flags */ unsigned int flags; /* X: flags */
...@@ -152,7 +153,6 @@ struct worker_pool { ...@@ -152,7 +153,6 @@ struct worker_pool {
*/ */
struct global_cwq { struct global_cwq {
spinlock_t lock; /* the gcwq lock */ spinlock_t lock; /* the gcwq lock */
unsigned int cpu; /* I: the associated cpu */
struct worker_pool pools[NR_STD_WORKER_POOLS]; struct worker_pool pools[NR_STD_WORKER_POOLS];
/* normal and highpri pools */ /* normal and highpri pools */
...@@ -489,7 +489,7 @@ static struct worker_pool *worker_pool_by_id(int pool_id) ...@@ -489,7 +489,7 @@ static struct worker_pool *worker_pool_by_id(int pool_id)
static atomic_t *get_pool_nr_running(struct worker_pool *pool) static atomic_t *get_pool_nr_running(struct worker_pool *pool)
{ {
int cpu = pool->gcwq->cpu; int cpu = pool->cpu;
int idx = std_worker_pool_pri(pool); int idx = std_worker_pool_pri(pool);
if (cpu != WORK_CPU_UNBOUND) if (cpu != WORK_CPU_UNBOUND)
...@@ -764,7 +764,7 @@ void wq_worker_waking_up(struct task_struct *task, unsigned int cpu) ...@@ -764,7 +764,7 @@ void wq_worker_waking_up(struct task_struct *task, unsigned int cpu)
struct worker *worker = kthread_data(task); struct worker *worker = kthread_data(task);
if (!(worker->flags & WORKER_NOT_RUNNING)) { if (!(worker->flags & WORKER_NOT_RUNNING)) {
WARN_ON_ONCE(worker->pool->gcwq->cpu != cpu); WARN_ON_ONCE(worker->pool->cpu != cpu);
atomic_inc(get_pool_nr_running(worker->pool)); atomic_inc(get_pool_nr_running(worker->pool));
} }
} }
...@@ -1278,7 +1278,7 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq, ...@@ -1278,7 +1278,7 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq,
} }
/* gcwq determined, get cwq and queue */ /* gcwq determined, get cwq and queue */
cwq = get_cwq(gcwq->cpu, wq); cwq = get_cwq(gcwq->pools[0].cpu, wq);
trace_workqueue_queue_work(req_cpu, cwq, work); trace_workqueue_queue_work(req_cpu, cwq, work);
if (WARN_ON(!list_empty(&work->entry))) { if (WARN_ON(!list_empty(&work->entry))) {
...@@ -1385,20 +1385,20 @@ static void __queue_delayed_work(int cpu, struct workqueue_struct *wq, ...@@ -1385,20 +1385,20 @@ static void __queue_delayed_work(int cpu, struct workqueue_struct *wq,
/* /*
* This stores cwq for the moment, for the timer_fn. Note that the * This stores cwq for the moment, for the timer_fn. Note that the
* work's gcwq is preserved to allow reentrance detection for * work's pool is preserved to allow reentrance detection for
* delayed works. * delayed works.
*/ */
if (!(wq->flags & WQ_UNBOUND)) { if (!(wq->flags & WQ_UNBOUND)) {
struct global_cwq *gcwq = get_work_gcwq(work); struct worker_pool *pool = get_work_pool(work);
/* /*
* If we cannot get the last gcwq from @work directly, * If we cannot get the last pool from @work directly,
* select the last CPU such that it avoids unnecessarily * select the last CPU such that it avoids unnecessarily
* triggering non-reentrancy check in __queue_work(). * triggering non-reentrancy check in __queue_work().
*/ */
lcpu = cpu; lcpu = cpu;
if (gcwq) if (pool)
lcpu = gcwq->cpu; lcpu = pool->cpu;
if (lcpu == WORK_CPU_UNBOUND) if (lcpu == WORK_CPU_UNBOUND)
lcpu = raw_smp_processor_id(); lcpu = raw_smp_processor_id();
} else { } else {
...@@ -1619,14 +1619,14 @@ __acquires(&gcwq->lock) ...@@ -1619,14 +1619,14 @@ __acquires(&gcwq->lock)
* against POOL_DISASSOCIATED. * against POOL_DISASSOCIATED.
*/ */
if (!(pool->flags & POOL_DISASSOCIATED)) if (!(pool->flags & POOL_DISASSOCIATED))
set_cpus_allowed_ptr(task, get_cpu_mask(gcwq->cpu)); set_cpus_allowed_ptr(task, get_cpu_mask(pool->cpu));
spin_lock_irq(&gcwq->lock); spin_lock_irq(&gcwq->lock);
if (pool->flags & POOL_DISASSOCIATED) if (pool->flags & POOL_DISASSOCIATED)
return false; return false;
if (task_cpu(task) == gcwq->cpu && if (task_cpu(task) == pool->cpu &&
cpumask_equal(&current->cpus_allowed, cpumask_equal(&current->cpus_allowed,
get_cpu_mask(gcwq->cpu))) get_cpu_mask(pool->cpu)))
return true; return true;
spin_unlock_irq(&gcwq->lock); spin_unlock_irq(&gcwq->lock);
...@@ -1747,7 +1747,7 @@ static void rebind_workers(struct global_cwq *gcwq) ...@@ -1747,7 +1747,7 @@ static void rebind_workers(struct global_cwq *gcwq)
else else
wq = system_wq; wq = system_wq;
insert_work(get_cwq(gcwq->cpu, wq), rebind_work, insert_work(get_cwq(pool->cpu, wq), rebind_work,
worker->scheduled.next, worker->scheduled.next,
work_color_to_flags(WORK_NO_COLOR)); work_color_to_flags(WORK_NO_COLOR));
} }
...@@ -1806,10 +1806,10 @@ static struct worker *create_worker(struct worker_pool *pool) ...@@ -1806,10 +1806,10 @@ static struct worker *create_worker(struct worker_pool *pool)
worker->pool = pool; worker->pool = pool;
worker->id = id; worker->id = id;
if (gcwq->cpu != WORK_CPU_UNBOUND) if (pool->cpu != WORK_CPU_UNBOUND)
worker->task = kthread_create_on_node(worker_thread, worker->task = kthread_create_on_node(worker_thread,
worker, cpu_to_node(gcwq->cpu), worker, cpu_to_node(pool->cpu),
"kworker/%u:%d%s", gcwq->cpu, id, pri); "kworker/%u:%d%s", pool->cpu, id, pri);
else else
worker->task = kthread_create(worker_thread, worker, worker->task = kthread_create(worker_thread, worker,
"kworker/u:%d%s", id, pri); "kworker/u:%d%s", id, pri);
...@@ -1829,7 +1829,7 @@ static struct worker *create_worker(struct worker_pool *pool) ...@@ -1829,7 +1829,7 @@ static struct worker *create_worker(struct worker_pool *pool)
* online, make sure every worker has %PF_THREAD_BOUND set. * online, make sure every worker has %PF_THREAD_BOUND set.
*/ */
if (!(pool->flags & POOL_DISASSOCIATED)) { if (!(pool->flags & POOL_DISASSOCIATED)) {
kthread_bind(worker->task, gcwq->cpu); kthread_bind(worker->task, pool->cpu);
} else { } else {
worker->task->flags |= PF_THREAD_BOUND; worker->task->flags |= PF_THREAD_BOUND;
worker->flags |= WORKER_UNBOUND; worker->flags |= WORKER_UNBOUND;
...@@ -1936,7 +1936,7 @@ static bool send_mayday(struct work_struct *work) ...@@ -1936,7 +1936,7 @@ static bool send_mayday(struct work_struct *work)
return false; return false;
/* mayday mayday mayday */ /* mayday mayday mayday */
cpu = cwq->pool->gcwq->cpu; cpu = cwq->pool->cpu;
/* WORK_CPU_UNBOUND can't be set in cpumask, use cpu 0 instead */ /* WORK_CPU_UNBOUND can't be set in cpumask, use cpu 0 instead */
if (cpu == WORK_CPU_UNBOUND) if (cpu == WORK_CPU_UNBOUND)
cpu = 0; cpu = 0;
...@@ -2193,7 +2193,7 @@ __acquires(&gcwq->lock) ...@@ -2193,7 +2193,7 @@ __acquires(&gcwq->lock)
*/ */
WARN_ON_ONCE(!(worker->flags & WORKER_UNBOUND) && WARN_ON_ONCE(!(worker->flags & WORKER_UNBOUND) &&
!(pool->flags & POOL_DISASSOCIATED) && !(pool->flags & POOL_DISASSOCIATED) &&
raw_smp_processor_id() != gcwq->cpu); raw_smp_processor_id() != pool->cpu);
/* /*
* A single work shouldn't be executed concurrently by * A single work shouldn't be executed concurrently by
...@@ -3553,7 +3553,7 @@ static void gcwq_unbind_fn(struct work_struct *work) ...@@ -3553,7 +3553,7 @@ static void gcwq_unbind_fn(struct work_struct *work)
struct hlist_node *pos; struct hlist_node *pos;
int i; int i;
BUG_ON(gcwq->cpu != smp_processor_id()); BUG_ON(gcwq->pools[0].cpu != smp_processor_id());
gcwq_claim_assoc_and_lock(gcwq); gcwq_claim_assoc_and_lock(gcwq);
...@@ -3860,10 +3860,10 @@ static int __init init_workqueues(void) ...@@ -3860,10 +3860,10 @@ static int __init init_workqueues(void)
struct worker_pool *pool; struct worker_pool *pool;
spin_lock_init(&gcwq->lock); spin_lock_init(&gcwq->lock);
gcwq->cpu = cpu;
for_each_worker_pool(pool, gcwq) { for_each_worker_pool(pool, gcwq) {
pool->gcwq = gcwq; pool->gcwq = gcwq;
pool->cpu = cpu;
pool->flags |= POOL_DISASSOCIATED; pool->flags |= POOL_DISASSOCIATED;
INIT_LIST_HEAD(&pool->worklist); INIT_LIST_HEAD(&pool->worklist);
INIT_LIST_HEAD(&pool->idle_list); INIT_LIST_HEAD(&pool->idle_list);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment