Commit a9b8a985 authored by Sebastian Andrzej Siewior's avatar Sebastian Andrzej Siewior Committed by Tejun Heo

workqueue: Convert the pool::lock and wq_mayday_lock to raw_spinlock_t

The workqueue code has it's internal spinlocks (pool::lock), which
are acquired on most workqueue operations. These spinlocks are
converted to 'sleeping' spinlocks on a RT-kernel.

Workqueue functions can be invoked from contexts which are truly atomic
even on a PREEMPT_RT enabled kernel. Taking sleeping locks from such
contexts is forbidden.

The pool::lock hold times are bound and the code sections are
relatively short, which allows to convert pool::lock and as a
consequence wq_mayday_lock to raw spinlocks which are truly spinning
locks even on a PREEMPT_RT kernel.

With the previous conversion of the manager waitqueue to a simple
waitqueue workqueues are now fully RT compliant.
Signed-off-by: default avatarSebastian Andrzej Siewior <bigeasy@linutronix.de>
Reviewed-by: default avatarLai Jiangshan <jiangshanlai@gmail.com>
Signed-off-by: default avatarTejun Heo <tj@kernel.org>
parent d8bb65ab
...@@ -145,7 +145,7 @@ enum { ...@@ -145,7 +145,7 @@ enum {
/* struct worker is defined in workqueue_internal.h */ /* struct worker is defined in workqueue_internal.h */
struct worker_pool { struct worker_pool {
spinlock_t lock; /* the pool lock */ raw_spinlock_t lock; /* the pool lock */
int cpu; /* I: the associated cpu */ int cpu; /* I: the associated cpu */
int node; /* I: the associated node ID */ int node; /* I: the associated node ID */
int id; /* I: pool ID */ int id; /* I: pool ID */
...@@ -300,7 +300,7 @@ static struct workqueue_attrs *wq_update_unbound_numa_attrs_buf; ...@@ -300,7 +300,7 @@ static struct workqueue_attrs *wq_update_unbound_numa_attrs_buf;
static DEFINE_MUTEX(wq_pool_mutex); /* protects pools and workqueues list */ static DEFINE_MUTEX(wq_pool_mutex); /* protects pools and workqueues list */
static DEFINE_MUTEX(wq_pool_attach_mutex); /* protects worker attach/detach */ static DEFINE_MUTEX(wq_pool_attach_mutex); /* protects worker attach/detach */
static DEFINE_SPINLOCK(wq_mayday_lock); /* protects wq->maydays list */ static DEFINE_RAW_SPINLOCK(wq_mayday_lock); /* protects wq->maydays list */
/* wait for manager to go away */ /* wait for manager to go away */
static struct rcuwait manager_wait = __RCUWAIT_INITIALIZER(manager_wait); static struct rcuwait manager_wait = __RCUWAIT_INITIALIZER(manager_wait);
...@@ -827,7 +827,7 @@ static struct worker *first_idle_worker(struct worker_pool *pool) ...@@ -827,7 +827,7 @@ static struct worker *first_idle_worker(struct worker_pool *pool)
* Wake up the first idle worker of @pool. * Wake up the first idle worker of @pool.
* *
* CONTEXT: * CONTEXT:
* spin_lock_irq(pool->lock). * raw_spin_lock_irq(pool->lock).
*/ */
static void wake_up_worker(struct worker_pool *pool) static void wake_up_worker(struct worker_pool *pool)
{ {
...@@ -882,7 +882,7 @@ void wq_worker_sleeping(struct task_struct *task) ...@@ -882,7 +882,7 @@ void wq_worker_sleeping(struct task_struct *task)
return; return;
worker->sleeping = 1; worker->sleeping = 1;
spin_lock_irq(&pool->lock); raw_spin_lock_irq(&pool->lock);
/* /*
* The counterpart of the following dec_and_test, implied mb, * The counterpart of the following dec_and_test, implied mb,
...@@ -901,7 +901,7 @@ void wq_worker_sleeping(struct task_struct *task) ...@@ -901,7 +901,7 @@ void wq_worker_sleeping(struct task_struct *task)
if (next) if (next)
wake_up_process(next->task); wake_up_process(next->task);
} }
spin_unlock_irq(&pool->lock); raw_spin_unlock_irq(&pool->lock);
} }
/** /**
...@@ -912,7 +912,7 @@ void wq_worker_sleeping(struct task_struct *task) ...@@ -912,7 +912,7 @@ void wq_worker_sleeping(struct task_struct *task)
* the scheduler to get a worker's last known identity. * the scheduler to get a worker's last known identity.
* *
* CONTEXT: * CONTEXT:
* spin_lock_irq(rq->lock) * raw_spin_lock_irq(rq->lock)
* *
* This function is called during schedule() when a kworker is going * This function is called during schedule() when a kworker is going
* to sleep. It's used by psi to identify aggregation workers during * to sleep. It's used by psi to identify aggregation workers during
...@@ -943,7 +943,7 @@ work_func_t wq_worker_last_func(struct task_struct *task) ...@@ -943,7 +943,7 @@ work_func_t wq_worker_last_func(struct task_struct *task)
* Set @flags in @worker->flags and adjust nr_running accordingly. * Set @flags in @worker->flags and adjust nr_running accordingly.
* *
* CONTEXT: * CONTEXT:
* spin_lock_irq(pool->lock) * raw_spin_lock_irq(pool->lock)
*/ */
static inline void worker_set_flags(struct worker *worker, unsigned int flags) static inline void worker_set_flags(struct worker *worker, unsigned int flags)
{ {
...@@ -968,7 +968,7 @@ static inline void worker_set_flags(struct worker *worker, unsigned int flags) ...@@ -968,7 +968,7 @@ static inline void worker_set_flags(struct worker *worker, unsigned int flags)
* Clear @flags in @worker->flags and adjust nr_running accordingly. * Clear @flags in @worker->flags and adjust nr_running accordingly.
* *
* CONTEXT: * CONTEXT:
* spin_lock_irq(pool->lock) * raw_spin_lock_irq(pool->lock)
*/ */
static inline void worker_clr_flags(struct worker *worker, unsigned int flags) static inline void worker_clr_flags(struct worker *worker, unsigned int flags)
{ {
...@@ -1016,7 +1016,7 @@ static inline void worker_clr_flags(struct worker *worker, unsigned int flags) ...@@ -1016,7 +1016,7 @@ static inline void worker_clr_flags(struct worker *worker, unsigned int flags)
* actually occurs, it should be easy to locate the culprit work function. * actually occurs, it should be easy to locate the culprit work function.
* *
* CONTEXT: * CONTEXT:
* spin_lock_irq(pool->lock). * raw_spin_lock_irq(pool->lock).
* *
* Return: * Return:
* Pointer to worker which is executing @work if found, %NULL * Pointer to worker which is executing @work if found, %NULL
...@@ -1051,7 +1051,7 @@ static struct worker *find_worker_executing_work(struct worker_pool *pool, ...@@ -1051,7 +1051,7 @@ static struct worker *find_worker_executing_work(struct worker_pool *pool,
* nested inside outer list_for_each_entry_safe(). * nested inside outer list_for_each_entry_safe().
* *
* CONTEXT: * CONTEXT:
* spin_lock_irq(pool->lock). * raw_spin_lock_irq(pool->lock).
*/ */
static void move_linked_works(struct work_struct *work, struct list_head *head, static void move_linked_works(struct work_struct *work, struct list_head *head,
struct work_struct **nextp) struct work_struct **nextp)
...@@ -1129,9 +1129,9 @@ static void put_pwq_unlocked(struct pool_workqueue *pwq) ...@@ -1129,9 +1129,9 @@ static void put_pwq_unlocked(struct pool_workqueue *pwq)
* As both pwqs and pools are RCU protected, the * As both pwqs and pools are RCU protected, the
* following lock operations are safe. * following lock operations are safe.
*/ */
spin_lock_irq(&pwq->pool->lock); raw_spin_lock_irq(&pwq->pool->lock);
put_pwq(pwq); put_pwq(pwq);
spin_unlock_irq(&pwq->pool->lock); raw_spin_unlock_irq(&pwq->pool->lock);
} }
} }
...@@ -1164,7 +1164,7 @@ static void pwq_activate_first_delayed(struct pool_workqueue *pwq) ...@@ -1164,7 +1164,7 @@ static void pwq_activate_first_delayed(struct pool_workqueue *pwq)
* decrement nr_in_flight of its pwq and handle workqueue flushing. * decrement nr_in_flight of its pwq and handle workqueue flushing.
* *
* CONTEXT: * CONTEXT:
* spin_lock_irq(pool->lock). * raw_spin_lock_irq(pool->lock).
*/ */
static void pwq_dec_nr_in_flight(struct pool_workqueue *pwq, int color) static void pwq_dec_nr_in_flight(struct pool_workqueue *pwq, int color)
{ {
...@@ -1263,7 +1263,7 @@ static int try_to_grab_pending(struct work_struct *work, bool is_dwork, ...@@ -1263,7 +1263,7 @@ static int try_to_grab_pending(struct work_struct *work, bool is_dwork,
if (!pool) if (!pool)
goto fail; goto fail;
spin_lock(&pool->lock); raw_spin_lock(&pool->lock);
/* /*
* work->data is guaranteed to point to pwq only while the work * work->data is guaranteed to point to pwq only while the work
* item is queued on pwq->wq, and both updating work->data to point * item is queued on pwq->wq, and both updating work->data to point
...@@ -1292,11 +1292,11 @@ static int try_to_grab_pending(struct work_struct *work, bool is_dwork, ...@@ -1292,11 +1292,11 @@ static int try_to_grab_pending(struct work_struct *work, bool is_dwork,
/* work->data points to pwq iff queued, point to pool */ /* work->data points to pwq iff queued, point to pool */
set_work_pool_and_keep_pending(work, pool->id); set_work_pool_and_keep_pending(work, pool->id);
spin_unlock(&pool->lock); raw_spin_unlock(&pool->lock);
rcu_read_unlock(); rcu_read_unlock();
return 1; return 1;
} }
spin_unlock(&pool->lock); raw_spin_unlock(&pool->lock);
fail: fail:
rcu_read_unlock(); rcu_read_unlock();
local_irq_restore(*flags); local_irq_restore(*flags);
...@@ -1317,7 +1317,7 @@ static int try_to_grab_pending(struct work_struct *work, bool is_dwork, ...@@ -1317,7 +1317,7 @@ static int try_to_grab_pending(struct work_struct *work, bool is_dwork,
* work_struct flags. * work_struct flags.
* *
* CONTEXT: * CONTEXT:
* spin_lock_irq(pool->lock). * raw_spin_lock_irq(pool->lock).
*/ */
static void insert_work(struct pool_workqueue *pwq, struct work_struct *work, static void insert_work(struct pool_workqueue *pwq, struct work_struct *work,
struct list_head *head, unsigned int extra_flags) struct list_head *head, unsigned int extra_flags)
...@@ -1434,7 +1434,7 @@ static void __queue_work(int cpu, struct workqueue_struct *wq, ...@@ -1434,7 +1434,7 @@ static void __queue_work(int cpu, struct workqueue_struct *wq,
if (last_pool && last_pool != pwq->pool) { if (last_pool && last_pool != pwq->pool) {
struct worker *worker; struct worker *worker;
spin_lock(&last_pool->lock); raw_spin_lock(&last_pool->lock);
worker = find_worker_executing_work(last_pool, work); worker = find_worker_executing_work(last_pool, work);
...@@ -1442,11 +1442,11 @@ static void __queue_work(int cpu, struct workqueue_struct *wq, ...@@ -1442,11 +1442,11 @@ static void __queue_work(int cpu, struct workqueue_struct *wq,
pwq = worker->current_pwq; pwq = worker->current_pwq;
} else { } else {
/* meh... not running there, queue here */ /* meh... not running there, queue here */
spin_unlock(&last_pool->lock); raw_spin_unlock(&last_pool->lock);
spin_lock(&pwq->pool->lock); raw_spin_lock(&pwq->pool->lock);
} }
} else { } else {
spin_lock(&pwq->pool->lock); raw_spin_lock(&pwq->pool->lock);
} }
/* /*
...@@ -1459,7 +1459,7 @@ static void __queue_work(int cpu, struct workqueue_struct *wq, ...@@ -1459,7 +1459,7 @@ static void __queue_work(int cpu, struct workqueue_struct *wq,
*/ */
if (unlikely(!pwq->refcnt)) { if (unlikely(!pwq->refcnt)) {
if (wq->flags & WQ_UNBOUND) { if (wq->flags & WQ_UNBOUND) {
spin_unlock(&pwq->pool->lock); raw_spin_unlock(&pwq->pool->lock);
cpu_relax(); cpu_relax();
goto retry; goto retry;
} }
...@@ -1491,7 +1491,7 @@ static void __queue_work(int cpu, struct workqueue_struct *wq, ...@@ -1491,7 +1491,7 @@ static void __queue_work(int cpu, struct workqueue_struct *wq,
insert_work(pwq, work, worklist, work_flags); insert_work(pwq, work, worklist, work_flags);
out: out:
spin_unlock(&pwq->pool->lock); raw_spin_unlock(&pwq->pool->lock);
rcu_read_unlock(); rcu_read_unlock();
} }
...@@ -1760,7 +1760,7 @@ EXPORT_SYMBOL(queue_rcu_work); ...@@ -1760,7 +1760,7 @@ EXPORT_SYMBOL(queue_rcu_work);
* necessary. * necessary.
* *
* LOCKING: * LOCKING:
* spin_lock_irq(pool->lock). * raw_spin_lock_irq(pool->lock).
*/ */
static void worker_enter_idle(struct worker *worker) static void worker_enter_idle(struct worker *worker)
{ {
...@@ -1800,7 +1800,7 @@ static void worker_enter_idle(struct worker *worker) ...@@ -1800,7 +1800,7 @@ static void worker_enter_idle(struct worker *worker)
* @worker is leaving idle state. Update stats. * @worker is leaving idle state. Update stats.
* *
* LOCKING: * LOCKING:
* spin_lock_irq(pool->lock). * raw_spin_lock_irq(pool->lock).
*/ */
static void worker_leave_idle(struct worker *worker) static void worker_leave_idle(struct worker *worker)
{ {
...@@ -1938,11 +1938,11 @@ static struct worker *create_worker(struct worker_pool *pool) ...@@ -1938,11 +1938,11 @@ static struct worker *create_worker(struct worker_pool *pool)
worker_attach_to_pool(worker, pool); worker_attach_to_pool(worker, pool);
/* start the newly created worker */ /* start the newly created worker */
spin_lock_irq(&pool->lock); raw_spin_lock_irq(&pool->lock);
worker->pool->nr_workers++; worker->pool->nr_workers++;
worker_enter_idle(worker); worker_enter_idle(worker);
wake_up_process(worker->task); wake_up_process(worker->task);
spin_unlock_irq(&pool->lock); raw_spin_unlock_irq(&pool->lock);
return worker; return worker;
...@@ -1961,7 +1961,7 @@ static struct worker *create_worker(struct worker_pool *pool) ...@@ -1961,7 +1961,7 @@ static struct worker *create_worker(struct worker_pool *pool)
* be idle. * be idle.
* *
* CONTEXT: * CONTEXT:
* spin_lock_irq(pool->lock). * raw_spin_lock_irq(pool->lock).
*/ */
static void destroy_worker(struct worker *worker) static void destroy_worker(struct worker *worker)
{ {
...@@ -1987,7 +1987,7 @@ static void idle_worker_timeout(struct timer_list *t) ...@@ -1987,7 +1987,7 @@ static void idle_worker_timeout(struct timer_list *t)
{ {
struct worker_pool *pool = from_timer(pool, t, idle_timer); struct worker_pool *pool = from_timer(pool, t, idle_timer);
spin_lock_irq(&pool->lock); raw_spin_lock_irq(&pool->lock);
while (too_many_workers(pool)) { while (too_many_workers(pool)) {
struct worker *worker; struct worker *worker;
...@@ -2005,7 +2005,7 @@ static void idle_worker_timeout(struct timer_list *t) ...@@ -2005,7 +2005,7 @@ static void idle_worker_timeout(struct timer_list *t)
destroy_worker(worker); destroy_worker(worker);
} }
spin_unlock_irq(&pool->lock); raw_spin_unlock_irq(&pool->lock);
} }
static void send_mayday(struct work_struct *work) static void send_mayday(struct work_struct *work)
...@@ -2036,8 +2036,8 @@ static void pool_mayday_timeout(struct timer_list *t) ...@@ -2036,8 +2036,8 @@ static void pool_mayday_timeout(struct timer_list *t)
struct worker_pool *pool = from_timer(pool, t, mayday_timer); struct worker_pool *pool = from_timer(pool, t, mayday_timer);
struct work_struct *work; struct work_struct *work;
spin_lock_irq(&pool->lock); raw_spin_lock_irq(&pool->lock);
spin_lock(&wq_mayday_lock); /* for wq->maydays */ raw_spin_lock(&wq_mayday_lock); /* for wq->maydays */
if (need_to_create_worker(pool)) { if (need_to_create_worker(pool)) {
/* /*
...@@ -2050,8 +2050,8 @@ static void pool_mayday_timeout(struct timer_list *t) ...@@ -2050,8 +2050,8 @@ static void pool_mayday_timeout(struct timer_list *t)
send_mayday(work); send_mayday(work);
} }
spin_unlock(&wq_mayday_lock); raw_spin_unlock(&wq_mayday_lock);
spin_unlock_irq(&pool->lock); raw_spin_unlock_irq(&pool->lock);
mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INTERVAL); mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INTERVAL);
} }
...@@ -2070,7 +2070,7 @@ static void pool_mayday_timeout(struct timer_list *t) ...@@ -2070,7 +2070,7 @@ static void pool_mayday_timeout(struct timer_list *t)
* may_start_working() %true. * may_start_working() %true.
* *
* LOCKING: * LOCKING:
* spin_lock_irq(pool->lock) which may be released and regrabbed * raw_spin_lock_irq(pool->lock) which may be released and regrabbed
* multiple times. Does GFP_KERNEL allocations. Called only from * multiple times. Does GFP_KERNEL allocations. Called only from
* manager. * manager.
*/ */
...@@ -2079,7 +2079,7 @@ __releases(&pool->lock) ...@@ -2079,7 +2079,7 @@ __releases(&pool->lock)
__acquires(&pool->lock) __acquires(&pool->lock)
{ {
restart: restart:
spin_unlock_irq(&pool->lock); raw_spin_unlock_irq(&pool->lock);
/* if we don't make progress in MAYDAY_INITIAL_TIMEOUT, call for help */ /* if we don't make progress in MAYDAY_INITIAL_TIMEOUT, call for help */
mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INITIAL_TIMEOUT); mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INITIAL_TIMEOUT);
...@@ -2095,7 +2095,7 @@ __acquires(&pool->lock) ...@@ -2095,7 +2095,7 @@ __acquires(&pool->lock)
} }
del_timer_sync(&pool->mayday_timer); del_timer_sync(&pool->mayday_timer);
spin_lock_irq(&pool->lock); raw_spin_lock_irq(&pool->lock);
/* /*
* This is necessary even after a new worker was just successfully * This is necessary even after a new worker was just successfully
* created as @pool->lock was dropped and the new worker might have * created as @pool->lock was dropped and the new worker might have
...@@ -2118,7 +2118,7 @@ __acquires(&pool->lock) ...@@ -2118,7 +2118,7 @@ __acquires(&pool->lock)
* and may_start_working() is true. * and may_start_working() is true.
* *
* CONTEXT: * CONTEXT:
* spin_lock_irq(pool->lock) which may be released and regrabbed * raw_spin_lock_irq(pool->lock) which may be released and regrabbed
* multiple times. Does GFP_KERNEL allocations. * multiple times. Does GFP_KERNEL allocations.
* *
* Return: * Return:
...@@ -2157,7 +2157,7 @@ static bool manage_workers(struct worker *worker) ...@@ -2157,7 +2157,7 @@ static bool manage_workers(struct worker *worker)
* call this function to process a work. * call this function to process a work.
* *
* CONTEXT: * CONTEXT:
* spin_lock_irq(pool->lock) which is released and regrabbed. * raw_spin_lock_irq(pool->lock) which is released and regrabbed.
*/ */
static void process_one_work(struct worker *worker, struct work_struct *work) static void process_one_work(struct worker *worker, struct work_struct *work)
__releases(&pool->lock) __releases(&pool->lock)
...@@ -2239,7 +2239,7 @@ __acquires(&pool->lock) ...@@ -2239,7 +2239,7 @@ __acquires(&pool->lock)
*/ */
set_work_pool_and_clear_pending(work, pool->id); set_work_pool_and_clear_pending(work, pool->id);
spin_unlock_irq(&pool->lock); raw_spin_unlock_irq(&pool->lock);
lock_map_acquire(&pwq->wq->lockdep_map); lock_map_acquire(&pwq->wq->lockdep_map);
lock_map_acquire(&lockdep_map); lock_map_acquire(&lockdep_map);
...@@ -2294,7 +2294,7 @@ __acquires(&pool->lock) ...@@ -2294,7 +2294,7 @@ __acquires(&pool->lock)
*/ */
cond_resched(); cond_resched();
spin_lock_irq(&pool->lock); raw_spin_lock_irq(&pool->lock);
/* clear cpu intensive status */ /* clear cpu intensive status */
if (unlikely(cpu_intensive)) if (unlikely(cpu_intensive))
...@@ -2320,7 +2320,7 @@ __acquires(&pool->lock) ...@@ -2320,7 +2320,7 @@ __acquires(&pool->lock)
* fetches a work from the top and executes it. * fetches a work from the top and executes it.
* *
* CONTEXT: * CONTEXT:
* spin_lock_irq(pool->lock) which may be released and regrabbed * raw_spin_lock_irq(pool->lock) which may be released and regrabbed
* multiple times. * multiple times.
*/ */
static void process_scheduled_works(struct worker *worker) static void process_scheduled_works(struct worker *worker)
...@@ -2362,11 +2362,11 @@ static int worker_thread(void *__worker) ...@@ -2362,11 +2362,11 @@ static int worker_thread(void *__worker)
/* tell the scheduler that this is a workqueue worker */ /* tell the scheduler that this is a workqueue worker */
set_pf_worker(true); set_pf_worker(true);
woke_up: woke_up:
spin_lock_irq(&pool->lock); raw_spin_lock_irq(&pool->lock);
/* am I supposed to die? */ /* am I supposed to die? */
if (unlikely(worker->flags & WORKER_DIE)) { if (unlikely(worker->flags & WORKER_DIE)) {
spin_unlock_irq(&pool->lock); raw_spin_unlock_irq(&pool->lock);
WARN_ON_ONCE(!list_empty(&worker->entry)); WARN_ON_ONCE(!list_empty(&worker->entry));
set_pf_worker(false); set_pf_worker(false);
...@@ -2432,7 +2432,7 @@ static int worker_thread(void *__worker) ...@@ -2432,7 +2432,7 @@ static int worker_thread(void *__worker)
*/ */
worker_enter_idle(worker); worker_enter_idle(worker);
__set_current_state(TASK_IDLE); __set_current_state(TASK_IDLE);
spin_unlock_irq(&pool->lock); raw_spin_unlock_irq(&pool->lock);
schedule(); schedule();
goto woke_up; goto woke_up;
} }
...@@ -2486,7 +2486,7 @@ static int rescuer_thread(void *__rescuer) ...@@ -2486,7 +2486,7 @@ static int rescuer_thread(void *__rescuer)
should_stop = kthread_should_stop(); should_stop = kthread_should_stop();
/* see whether any pwq is asking for help */ /* see whether any pwq is asking for help */
spin_lock_irq(&wq_mayday_lock); raw_spin_lock_irq(&wq_mayday_lock);
while (!list_empty(&wq->maydays)) { while (!list_empty(&wq->maydays)) {
struct pool_workqueue *pwq = list_first_entry(&wq->maydays, struct pool_workqueue *pwq = list_first_entry(&wq->maydays,
...@@ -2498,11 +2498,11 @@ static int rescuer_thread(void *__rescuer) ...@@ -2498,11 +2498,11 @@ static int rescuer_thread(void *__rescuer)
__set_current_state(TASK_RUNNING); __set_current_state(TASK_RUNNING);
list_del_init(&pwq->mayday_node); list_del_init(&pwq->mayday_node);
spin_unlock_irq(&wq_mayday_lock); raw_spin_unlock_irq(&wq_mayday_lock);
worker_attach_to_pool(rescuer, pool); worker_attach_to_pool(rescuer, pool);
spin_lock_irq(&pool->lock); raw_spin_lock_irq(&pool->lock);
/* /*
* Slurp in all works issued via this workqueue and * Slurp in all works issued via this workqueue and
...@@ -2531,7 +2531,7 @@ static int rescuer_thread(void *__rescuer) ...@@ -2531,7 +2531,7 @@ static int rescuer_thread(void *__rescuer)
* incur MAYDAY_INTERVAL delay inbetween. * incur MAYDAY_INTERVAL delay inbetween.
*/ */
if (need_to_create_worker(pool)) { if (need_to_create_worker(pool)) {
spin_lock(&wq_mayday_lock); raw_spin_lock(&wq_mayday_lock);
/* /*
* Queue iff we aren't racing destruction * Queue iff we aren't racing destruction
* and somebody else hasn't queued it already. * and somebody else hasn't queued it already.
...@@ -2540,7 +2540,7 @@ static int rescuer_thread(void *__rescuer) ...@@ -2540,7 +2540,7 @@ static int rescuer_thread(void *__rescuer)
get_pwq(pwq); get_pwq(pwq);
list_add_tail(&pwq->mayday_node, &wq->maydays); list_add_tail(&pwq->mayday_node, &wq->maydays);
} }
spin_unlock(&wq_mayday_lock); raw_spin_unlock(&wq_mayday_lock);
} }
} }
...@@ -2558,14 +2558,14 @@ static int rescuer_thread(void *__rescuer) ...@@ -2558,14 +2558,14 @@ static int rescuer_thread(void *__rescuer)
if (need_more_worker(pool)) if (need_more_worker(pool))
wake_up_worker(pool); wake_up_worker(pool);
spin_unlock_irq(&pool->lock); raw_spin_unlock_irq(&pool->lock);
worker_detach_from_pool(rescuer); worker_detach_from_pool(rescuer);
spin_lock_irq(&wq_mayday_lock); raw_spin_lock_irq(&wq_mayday_lock);
} }
spin_unlock_irq(&wq_mayday_lock); raw_spin_unlock_irq(&wq_mayday_lock);
if (should_stop) { if (should_stop) {
__set_current_state(TASK_RUNNING); __set_current_state(TASK_RUNNING);
...@@ -2645,7 +2645,7 @@ static void wq_barrier_func(struct work_struct *work) ...@@ -2645,7 +2645,7 @@ static void wq_barrier_func(struct work_struct *work)
* underneath us, so we can't reliably determine pwq from @target. * underneath us, so we can't reliably determine pwq from @target.
* *
* CONTEXT: * CONTEXT:
* spin_lock_irq(pool->lock). * raw_spin_lock_irq(pool->lock).
*/ */
static void insert_wq_barrier(struct pool_workqueue *pwq, static void insert_wq_barrier(struct pool_workqueue *pwq,
struct wq_barrier *barr, struct wq_barrier *barr,
...@@ -2732,7 +2732,7 @@ static bool flush_workqueue_prep_pwqs(struct workqueue_struct *wq, ...@@ -2732,7 +2732,7 @@ static bool flush_workqueue_prep_pwqs(struct workqueue_struct *wq,
for_each_pwq(pwq, wq) { for_each_pwq(pwq, wq) {
struct worker_pool *pool = pwq->pool; struct worker_pool *pool = pwq->pool;
spin_lock_irq(&pool->lock); raw_spin_lock_irq(&pool->lock);
if (flush_color >= 0) { if (flush_color >= 0) {
WARN_ON_ONCE(pwq->flush_color != -1); WARN_ON_ONCE(pwq->flush_color != -1);
...@@ -2749,7 +2749,7 @@ static bool flush_workqueue_prep_pwqs(struct workqueue_struct *wq, ...@@ -2749,7 +2749,7 @@ static bool flush_workqueue_prep_pwqs(struct workqueue_struct *wq,
pwq->work_color = work_color; pwq->work_color = work_color;
} }
spin_unlock_irq(&pool->lock); raw_spin_unlock_irq(&pool->lock);
} }
if (flush_color >= 0 && atomic_dec_and_test(&wq->nr_pwqs_to_flush)) if (flush_color >= 0 && atomic_dec_and_test(&wq->nr_pwqs_to_flush))
...@@ -2949,9 +2949,9 @@ void drain_workqueue(struct workqueue_struct *wq) ...@@ -2949,9 +2949,9 @@ void drain_workqueue(struct workqueue_struct *wq)
for_each_pwq(pwq, wq) { for_each_pwq(pwq, wq) {
bool drained; bool drained;
spin_lock_irq(&pwq->pool->lock); raw_spin_lock_irq(&pwq->pool->lock);
drained = !pwq->nr_active && list_empty(&pwq->delayed_works); drained = !pwq->nr_active && list_empty(&pwq->delayed_works);
spin_unlock_irq(&pwq->pool->lock); raw_spin_unlock_irq(&pwq->pool->lock);
if (drained) if (drained)
continue; continue;
...@@ -2987,7 +2987,7 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr, ...@@ -2987,7 +2987,7 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr,
return false; return false;
} }
spin_lock_irq(&pool->lock); raw_spin_lock_irq(&pool->lock);
/* see the comment in try_to_grab_pending() with the same code */ /* see the comment in try_to_grab_pending() with the same code */
pwq = get_work_pwq(work); pwq = get_work_pwq(work);
if (pwq) { if (pwq) {
...@@ -3003,7 +3003,7 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr, ...@@ -3003,7 +3003,7 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr,
check_flush_dependency(pwq->wq, work); check_flush_dependency(pwq->wq, work);
insert_wq_barrier(pwq, barr, work, worker); insert_wq_barrier(pwq, barr, work, worker);
spin_unlock_irq(&pool->lock); raw_spin_unlock_irq(&pool->lock);
/* /*
* Force a lock recursion deadlock when using flush_work() inside a * Force a lock recursion deadlock when using flush_work() inside a
...@@ -3022,7 +3022,7 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr, ...@@ -3022,7 +3022,7 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr,
rcu_read_unlock(); rcu_read_unlock();
return true; return true;
already_gone: already_gone:
spin_unlock_irq(&pool->lock); raw_spin_unlock_irq(&pool->lock);
rcu_read_unlock(); rcu_read_unlock();
return false; return false;
} }
...@@ -3415,7 +3415,7 @@ static bool wqattrs_equal(const struct workqueue_attrs *a, ...@@ -3415,7 +3415,7 @@ static bool wqattrs_equal(const struct workqueue_attrs *a,
*/ */
static int init_worker_pool(struct worker_pool *pool) static int init_worker_pool(struct worker_pool *pool)
{ {
spin_lock_init(&pool->lock); raw_spin_lock_init(&pool->lock);
pool->id = -1; pool->id = -1;
pool->cpu = -1; pool->cpu = -1;
pool->node = NUMA_NO_NODE; pool->node = NUMA_NO_NODE;
...@@ -3507,10 +3507,10 @@ static void rcu_free_pool(struct rcu_head *rcu) ...@@ -3507,10 +3507,10 @@ static void rcu_free_pool(struct rcu_head *rcu)
/* This returns with the lock held on success (pool manager is inactive). */ /* This returns with the lock held on success (pool manager is inactive). */
static bool wq_manager_inactive(struct worker_pool *pool) static bool wq_manager_inactive(struct worker_pool *pool)
{ {
spin_lock_irq(&pool->lock); raw_spin_lock_irq(&pool->lock);
if (pool->flags & POOL_MANAGER_ACTIVE) { if (pool->flags & POOL_MANAGER_ACTIVE) {
spin_unlock_irq(&pool->lock); raw_spin_unlock_irq(&pool->lock);
return false; return false;
} }
return true; return true;
...@@ -3561,7 +3561,7 @@ static void put_unbound_pool(struct worker_pool *pool) ...@@ -3561,7 +3561,7 @@ static void put_unbound_pool(struct worker_pool *pool)
while ((worker = first_idle_worker(pool))) while ((worker = first_idle_worker(pool)))
destroy_worker(worker); destroy_worker(worker);
WARN_ON(pool->nr_workers || pool->nr_idle); WARN_ON(pool->nr_workers || pool->nr_idle);
spin_unlock_irq(&pool->lock); raw_spin_unlock_irq(&pool->lock);
mutex_lock(&wq_pool_attach_mutex); mutex_lock(&wq_pool_attach_mutex);
if (!list_empty(&pool->workers)) if (!list_empty(&pool->workers))
...@@ -3717,7 +3717,7 @@ static void pwq_adjust_max_active(struct pool_workqueue *pwq) ...@@ -3717,7 +3717,7 @@ static void pwq_adjust_max_active(struct pool_workqueue *pwq)
return; return;
/* this function can be called during early boot w/ irq disabled */ /* this function can be called during early boot w/ irq disabled */
spin_lock_irqsave(&pwq->pool->lock, flags); raw_spin_lock_irqsave(&pwq->pool->lock, flags);
/* /*
* During [un]freezing, the caller is responsible for ensuring that * During [un]freezing, the caller is responsible for ensuring that
...@@ -3740,7 +3740,7 @@ static void pwq_adjust_max_active(struct pool_workqueue *pwq) ...@@ -3740,7 +3740,7 @@ static void pwq_adjust_max_active(struct pool_workqueue *pwq)
pwq->max_active = 0; pwq->max_active = 0;
} }
spin_unlock_irqrestore(&pwq->pool->lock, flags); raw_spin_unlock_irqrestore(&pwq->pool->lock, flags);
} }
/* initialize newly alloced @pwq which is associated with @wq and @pool */ /* initialize newly alloced @pwq which is associated with @wq and @pool */
...@@ -4142,9 +4142,9 @@ static void wq_update_unbound_numa(struct workqueue_struct *wq, int cpu, ...@@ -4142,9 +4142,9 @@ static void wq_update_unbound_numa(struct workqueue_struct *wq, int cpu,
use_dfl_pwq: use_dfl_pwq:
mutex_lock(&wq->mutex); mutex_lock(&wq->mutex);
spin_lock_irq(&wq->dfl_pwq->pool->lock); raw_spin_lock_irq(&wq->dfl_pwq->pool->lock);
get_pwq(wq->dfl_pwq); get_pwq(wq->dfl_pwq);
spin_unlock_irq(&wq->dfl_pwq->pool->lock); raw_spin_unlock_irq(&wq->dfl_pwq->pool->lock);
old_pwq = numa_pwq_tbl_install(wq, node, wq->dfl_pwq); old_pwq = numa_pwq_tbl_install(wq, node, wq->dfl_pwq);
out_unlock: out_unlock:
mutex_unlock(&wq->mutex); mutex_unlock(&wq->mutex);
...@@ -4373,9 +4373,9 @@ void destroy_workqueue(struct workqueue_struct *wq) ...@@ -4373,9 +4373,9 @@ void destroy_workqueue(struct workqueue_struct *wq)
struct worker *rescuer = wq->rescuer; struct worker *rescuer = wq->rescuer;
/* this prevents new queueing */ /* this prevents new queueing */
spin_lock_irq(&wq_mayday_lock); raw_spin_lock_irq(&wq_mayday_lock);
wq->rescuer = NULL; wq->rescuer = NULL;
spin_unlock_irq(&wq_mayday_lock); raw_spin_unlock_irq(&wq_mayday_lock);
/* rescuer will empty maydays list before exiting */ /* rescuer will empty maydays list before exiting */
kthread_stop(rescuer->task); kthread_stop(rescuer->task);
...@@ -4389,18 +4389,18 @@ void destroy_workqueue(struct workqueue_struct *wq) ...@@ -4389,18 +4389,18 @@ void destroy_workqueue(struct workqueue_struct *wq)
mutex_lock(&wq_pool_mutex); mutex_lock(&wq_pool_mutex);
mutex_lock(&wq->mutex); mutex_lock(&wq->mutex);
for_each_pwq(pwq, wq) { for_each_pwq(pwq, wq) {
spin_lock_irq(&pwq->pool->lock); raw_spin_lock_irq(&pwq->pool->lock);
if (WARN_ON(pwq_busy(pwq))) { if (WARN_ON(pwq_busy(pwq))) {
pr_warn("%s: %s has the following busy pwq\n", pr_warn("%s: %s has the following busy pwq\n",
__func__, wq->name); __func__, wq->name);
show_pwq(pwq); show_pwq(pwq);
spin_unlock_irq(&pwq->pool->lock); raw_spin_unlock_irq(&pwq->pool->lock);
mutex_unlock(&wq->mutex); mutex_unlock(&wq->mutex);
mutex_unlock(&wq_pool_mutex); mutex_unlock(&wq_pool_mutex);
show_workqueue_state(); show_workqueue_state();
return; return;
} }
spin_unlock_irq(&pwq->pool->lock); raw_spin_unlock_irq(&pwq->pool->lock);
} }
mutex_unlock(&wq->mutex); mutex_unlock(&wq->mutex);
mutex_unlock(&wq_pool_mutex); mutex_unlock(&wq_pool_mutex);
...@@ -4571,10 +4571,10 @@ unsigned int work_busy(struct work_struct *work) ...@@ -4571,10 +4571,10 @@ unsigned int work_busy(struct work_struct *work)
rcu_read_lock(); rcu_read_lock();
pool = get_work_pool(work); pool = get_work_pool(work);
if (pool) { if (pool) {
spin_lock_irqsave(&pool->lock, flags); raw_spin_lock_irqsave(&pool->lock, flags);
if (find_worker_executing_work(pool, work)) if (find_worker_executing_work(pool, work))
ret |= WORK_BUSY_RUNNING; ret |= WORK_BUSY_RUNNING;
spin_unlock_irqrestore(&pool->lock, flags); raw_spin_unlock_irqrestore(&pool->lock, flags);
} }
rcu_read_unlock(); rcu_read_unlock();
...@@ -4781,10 +4781,10 @@ void show_workqueue_state(void) ...@@ -4781,10 +4781,10 @@ void show_workqueue_state(void)
pr_info("workqueue %s: flags=0x%x\n", wq->name, wq->flags); pr_info("workqueue %s: flags=0x%x\n", wq->name, wq->flags);
for_each_pwq(pwq, wq) { for_each_pwq(pwq, wq) {
spin_lock_irqsave(&pwq->pool->lock, flags); raw_spin_lock_irqsave(&pwq->pool->lock, flags);
if (pwq->nr_active || !list_empty(&pwq->delayed_works)) if (pwq->nr_active || !list_empty(&pwq->delayed_works))
show_pwq(pwq); show_pwq(pwq);
spin_unlock_irqrestore(&pwq->pool->lock, flags); raw_spin_unlock_irqrestore(&pwq->pool->lock, flags);
/* /*
* We could be printing a lot from atomic context, e.g. * We could be printing a lot from atomic context, e.g.
* sysrq-t -> show_workqueue_state(). Avoid triggering * sysrq-t -> show_workqueue_state(). Avoid triggering
...@@ -4798,7 +4798,7 @@ void show_workqueue_state(void) ...@@ -4798,7 +4798,7 @@ void show_workqueue_state(void)
struct worker *worker; struct worker *worker;
bool first = true; bool first = true;
spin_lock_irqsave(&pool->lock, flags); raw_spin_lock_irqsave(&pool->lock, flags);
if (pool->nr_workers == pool->nr_idle) if (pool->nr_workers == pool->nr_idle)
goto next_pool; goto next_pool;
...@@ -4817,7 +4817,7 @@ void show_workqueue_state(void) ...@@ -4817,7 +4817,7 @@ void show_workqueue_state(void)
} }
pr_cont("\n"); pr_cont("\n");
next_pool: next_pool:
spin_unlock_irqrestore(&pool->lock, flags); raw_spin_unlock_irqrestore(&pool->lock, flags);
/* /*
* We could be printing a lot from atomic context, e.g. * We could be printing a lot from atomic context, e.g.
* sysrq-t -> show_workqueue_state(). Avoid triggering * sysrq-t -> show_workqueue_state(). Avoid triggering
...@@ -4847,7 +4847,7 @@ void wq_worker_comm(char *buf, size_t size, struct task_struct *task) ...@@ -4847,7 +4847,7 @@ void wq_worker_comm(char *buf, size_t size, struct task_struct *task)
struct worker_pool *pool = worker->pool; struct worker_pool *pool = worker->pool;
if (pool) { if (pool) {
spin_lock_irq(&pool->lock); raw_spin_lock_irq(&pool->lock);
/* /*
* ->desc tracks information (wq name or * ->desc tracks information (wq name or
* set_worker_desc()) for the latest execution. If * set_worker_desc()) for the latest execution. If
...@@ -4861,7 +4861,7 @@ void wq_worker_comm(char *buf, size_t size, struct task_struct *task) ...@@ -4861,7 +4861,7 @@ void wq_worker_comm(char *buf, size_t size, struct task_struct *task)
scnprintf(buf + off, size - off, "-%s", scnprintf(buf + off, size - off, "-%s",
worker->desc); worker->desc);
} }
spin_unlock_irq(&pool->lock); raw_spin_unlock_irq(&pool->lock);
} }
} }
...@@ -4892,7 +4892,7 @@ static void unbind_workers(int cpu) ...@@ -4892,7 +4892,7 @@ static void unbind_workers(int cpu)
for_each_cpu_worker_pool(pool, cpu) { for_each_cpu_worker_pool(pool, cpu) {
mutex_lock(&wq_pool_attach_mutex); mutex_lock(&wq_pool_attach_mutex);
spin_lock_irq(&pool->lock); raw_spin_lock_irq(&pool->lock);
/* /*
* We've blocked all attach/detach operations. Make all workers * We've blocked all attach/detach operations. Make all workers
...@@ -4906,7 +4906,7 @@ static void unbind_workers(int cpu) ...@@ -4906,7 +4906,7 @@ static void unbind_workers(int cpu)
pool->flags |= POOL_DISASSOCIATED; pool->flags |= POOL_DISASSOCIATED;
spin_unlock_irq(&pool->lock); raw_spin_unlock_irq(&pool->lock);
mutex_unlock(&wq_pool_attach_mutex); mutex_unlock(&wq_pool_attach_mutex);
/* /*
...@@ -4932,9 +4932,9 @@ static void unbind_workers(int cpu) ...@@ -4932,9 +4932,9 @@ static void unbind_workers(int cpu)
* worker blocking could lead to lengthy stalls. Kick off * worker blocking could lead to lengthy stalls. Kick off
* unbound chain execution of currently pending work items. * unbound chain execution of currently pending work items.
*/ */
spin_lock_irq(&pool->lock); raw_spin_lock_irq(&pool->lock);
wake_up_worker(pool); wake_up_worker(pool);
spin_unlock_irq(&pool->lock); raw_spin_unlock_irq(&pool->lock);
} }
} }
...@@ -4961,7 +4961,7 @@ static void rebind_workers(struct worker_pool *pool) ...@@ -4961,7 +4961,7 @@ static void rebind_workers(struct worker_pool *pool)
WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task,
pool->attrs->cpumask) < 0); pool->attrs->cpumask) < 0);
spin_lock_irq(&pool->lock); raw_spin_lock_irq(&pool->lock);
pool->flags &= ~POOL_DISASSOCIATED; pool->flags &= ~POOL_DISASSOCIATED;
...@@ -5000,7 +5000,7 @@ static void rebind_workers(struct worker_pool *pool) ...@@ -5000,7 +5000,7 @@ static void rebind_workers(struct worker_pool *pool)
WRITE_ONCE(worker->flags, worker_flags); WRITE_ONCE(worker->flags, worker_flags);
} }
spin_unlock_irq(&pool->lock); raw_spin_unlock_irq(&pool->lock);
} }
/** /**
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment