Commit 06987dad authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-4.14-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq

Pull workqueue fix from Tejun Heo:
 "This is a fix for an old bug in workqueue. Workqueue used a mutex to
  arbitrate who gets to be the manager of a pool. When the manager role
  gets released, the mutex gets unlocked while holding the pool's
  irqsafe spinlock. This can lead to deadlocks as mutex's internal
  spinlock isn't irqsafe. This got discovered by recent fixes to mutex
  lockdep annotations.

  The fix is a bit invasive for rc6 but if anything were wrong with the
  fix it would likely have already blown up in -next, and we want the
  fix in -stable anyway"

* 'for-4.14-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq:
  workqueue: replace pool->manager_arb mutex with a flag
parents 2f1b11c5 692b4825
...@@ -68,6 +68,7 @@ enum { ...@@ -68,6 +68,7 @@ enum {
* attach_mutex to avoid changing binding state while * attach_mutex to avoid changing binding state while
* worker_attach_to_pool() is in progress. * worker_attach_to_pool() is in progress.
*/ */
POOL_MANAGER_ACTIVE = 1 << 0, /* being managed */
POOL_DISASSOCIATED = 1 << 2, /* cpu can't serve workers */ POOL_DISASSOCIATED = 1 << 2, /* cpu can't serve workers */
/* worker flags */ /* worker flags */
...@@ -165,7 +166,6 @@ struct worker_pool { ...@@ -165,7 +166,6 @@ struct worker_pool {
/* L: hash of busy workers */ /* L: hash of busy workers */
/* see manage_workers() for details on the two manager mutexes */ /* see manage_workers() for details on the two manager mutexes */
struct mutex manager_arb; /* manager arbitration */
struct worker *manager; /* L: purely informational */ struct worker *manager; /* L: purely informational */
struct mutex attach_mutex; /* attach/detach exclusion */ struct mutex attach_mutex; /* attach/detach exclusion */
struct list_head workers; /* A: attached workers */ struct list_head workers; /* A: attached workers */
...@@ -299,6 +299,7 @@ static struct workqueue_attrs *wq_update_unbound_numa_attrs_buf; ...@@ -299,6 +299,7 @@ static struct workqueue_attrs *wq_update_unbound_numa_attrs_buf;
static DEFINE_MUTEX(wq_pool_mutex); /* protects pools and workqueues list */ static DEFINE_MUTEX(wq_pool_mutex); /* protects pools and workqueues list */
static DEFINE_SPINLOCK(wq_mayday_lock); /* protects wq->maydays list */ static DEFINE_SPINLOCK(wq_mayday_lock); /* protects wq->maydays list */
static DECLARE_WAIT_QUEUE_HEAD(wq_manager_wait); /* wait for manager to go away */
static LIST_HEAD(workqueues); /* PR: list of all workqueues */ static LIST_HEAD(workqueues); /* PR: list of all workqueues */
static bool workqueue_freezing; /* PL: have wqs started freezing? */ static bool workqueue_freezing; /* PL: have wqs started freezing? */
...@@ -801,7 +802,7 @@ static bool need_to_create_worker(struct worker_pool *pool) ...@@ -801,7 +802,7 @@ static bool need_to_create_worker(struct worker_pool *pool)
/* Do we have too many workers and should some go away? */ /* Do we have too many workers and should some go away? */
static bool too_many_workers(struct worker_pool *pool) static bool too_many_workers(struct worker_pool *pool)
{ {
bool managing = mutex_is_locked(&pool->manager_arb); bool managing = pool->flags & POOL_MANAGER_ACTIVE;
int nr_idle = pool->nr_idle + managing; /* manager is considered idle */ int nr_idle = pool->nr_idle + managing; /* manager is considered idle */
int nr_busy = pool->nr_workers - nr_idle; int nr_busy = pool->nr_workers - nr_idle;
...@@ -1980,24 +1981,17 @@ static bool manage_workers(struct worker *worker) ...@@ -1980,24 +1981,17 @@ static bool manage_workers(struct worker *worker)
{ {
struct worker_pool *pool = worker->pool; struct worker_pool *pool = worker->pool;
/* if (pool->flags & POOL_MANAGER_ACTIVE)
* Anyone who successfully grabs manager_arb wins the arbitration
* and becomes the manager. mutex_trylock() on pool->manager_arb
* failure while holding pool->lock reliably indicates that someone
* else is managing the pool and the worker which failed trylock
* can proceed to executing work items. This means that anyone
* grabbing manager_arb is responsible for actually performing
* manager duties. If manager_arb is grabbed and released without
* actual management, the pool may stall indefinitely.
*/
if (!mutex_trylock(&pool->manager_arb))
return false; return false;
pool->flags |= POOL_MANAGER_ACTIVE;
pool->manager = worker; pool->manager = worker;
maybe_create_worker(pool); maybe_create_worker(pool);
pool->manager = NULL; pool->manager = NULL;
mutex_unlock(&pool->manager_arb); pool->flags &= ~POOL_MANAGER_ACTIVE;
wake_up(&wq_manager_wait);
return true; return true;
} }
...@@ -3248,7 +3242,6 @@ static int init_worker_pool(struct worker_pool *pool) ...@@ -3248,7 +3242,6 @@ static int init_worker_pool(struct worker_pool *pool)
setup_timer(&pool->mayday_timer, pool_mayday_timeout, setup_timer(&pool->mayday_timer, pool_mayday_timeout,
(unsigned long)pool); (unsigned long)pool);
mutex_init(&pool->manager_arb);
mutex_init(&pool->attach_mutex); mutex_init(&pool->attach_mutex);
INIT_LIST_HEAD(&pool->workers); INIT_LIST_HEAD(&pool->workers);
...@@ -3318,13 +3311,15 @@ static void put_unbound_pool(struct worker_pool *pool) ...@@ -3318,13 +3311,15 @@ static void put_unbound_pool(struct worker_pool *pool)
hash_del(&pool->hash_node); hash_del(&pool->hash_node);
/* /*
* Become the manager and destroy all workers. Grabbing * Become the manager and destroy all workers. This prevents
* manager_arb prevents @pool's workers from blocking on * @pool's workers from blocking on attach_mutex. We're the last
* attach_mutex. * manager and @pool gets freed with the flag set.
*/ */
mutex_lock(&pool->manager_arb);
spin_lock_irq(&pool->lock); spin_lock_irq(&pool->lock);
wait_event_lock_irq(wq_manager_wait,
!(pool->flags & POOL_MANAGER_ACTIVE), pool->lock);
pool->flags |= POOL_MANAGER_ACTIVE;
while ((worker = first_idle_worker(pool))) while ((worker = first_idle_worker(pool)))
destroy_worker(worker); destroy_worker(worker);
WARN_ON(pool->nr_workers || pool->nr_idle); WARN_ON(pool->nr_workers || pool->nr_idle);
...@@ -3338,8 +3333,6 @@ static void put_unbound_pool(struct worker_pool *pool) ...@@ -3338,8 +3333,6 @@ static void put_unbound_pool(struct worker_pool *pool)
if (pool->detach_completion) if (pool->detach_completion)
wait_for_completion(pool->detach_completion); wait_for_completion(pool->detach_completion);
mutex_unlock(&pool->manager_arb);
/* shut down the timers */ /* shut down the timers */
del_timer_sync(&pool->idle_timer); del_timer_sync(&pool->idle_timer);
del_timer_sync(&pool->mayday_timer); del_timer_sync(&pool->mayday_timer);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment