Commit cd2440d6 authored by Petr Mladek's avatar Petr Mladek Committed by Tejun Heo

workqueue: Print backtraces from CPUs with hung CPU bound workqueues

The workqueue watchdog reports a lockup when there was not any progress
in the worker pool for a long time. The progress means that a pending
work item starts being proceed.

Worker pools for unbound workqueues always wake up an idle worker and
try to process the work immediately. The last idle worker has to create
new worker first. The stall might happen only when a new worker could
not be created in which case an error should get printed. Another problem
might be too high load. In this case, workers are victims of a global
system problem.

Worker pools for CPU bound workqueues are designed for lightweight
work items that do not need much CPU time. They are proceed one by
one on a single worker. New worker is used only when a work is sleeping.
It creates one additional scenario. The stall might happen when
the CPU-bound workqueue is used for CPU-intensive work.

More precisely, the stall is detected when a CPU-bound worker is in
the TASK_RUNNING state for too long. In this case, it might be useful
to see the backtrace from the problematic worker.

The information how long a worker is in the running state is not available.
But the CPU-bound worker pools do not have many workers in the running
state by definition. And only few pools are typically blocked.

It should be acceptable to print backtraces from all workers in
TASK_RUNNING state in the stalled worker pools. The number of false
positives should be very low.
Signed-off-by: default avatarPetr Mladek <pmladek@suse.com>
Signed-off-by: default avatarTejun Heo <tj@kernel.org>
parent 4c0736a7
...@@ -49,6 +49,7 @@ ...@@ -49,6 +49,7 @@
#include <linux/moduleparam.h> #include <linux/moduleparam.h>
#include <linux/uaccess.h> #include <linux/uaccess.h>
#include <linux/sched/isolation.h> #include <linux/sched/isolation.h>
#include <linux/sched/debug.h>
#include <linux/nmi.h> #include <linux/nmi.h>
#include <linux/kvm_para.h> #include <linux/kvm_para.h>
...@@ -141,6 +142,8 @@ enum { ...@@ -141,6 +142,8 @@ enum {
* WR: wq->mutex protected for writes. RCU protected for reads. * WR: wq->mutex protected for writes. RCU protected for reads.
* *
* MD: wq_mayday_lock protected. * MD: wq_mayday_lock protected.
*
* WD: Used internally by the watchdog.
*/ */
/* struct worker is defined in workqueue_internal.h */ /* struct worker is defined in workqueue_internal.h */
...@@ -153,6 +156,7 @@ struct worker_pool { ...@@ -153,6 +156,7 @@ struct worker_pool {
unsigned int flags; /* X: flags */ unsigned int flags; /* X: flags */
unsigned long watchdog_ts; /* L: watchdog timestamp */ unsigned long watchdog_ts; /* L: watchdog timestamp */
bool cpu_stall; /* WD: stalled cpu bound pool */
/* /*
* The counter is incremented in a process context on the associated CPU * The counter is incremented in a process context on the associated CPU
...@@ -5976,6 +5980,57 @@ static struct timer_list wq_watchdog_timer; ...@@ -5976,6 +5980,57 @@ static struct timer_list wq_watchdog_timer;
static unsigned long wq_watchdog_touched = INITIAL_JIFFIES; static unsigned long wq_watchdog_touched = INITIAL_JIFFIES;
static DEFINE_PER_CPU(unsigned long, wq_watchdog_touched_cpu) = INITIAL_JIFFIES; static DEFINE_PER_CPU(unsigned long, wq_watchdog_touched_cpu) = INITIAL_JIFFIES;
/*
* Show workers that might prevent the processing of pending work items.
* The only candidates are CPU-bound workers in the running state.
* Pending work items should be handled by another idle worker
* in all other situations.
*/
static void show_cpu_pool_hog(struct worker_pool *pool)
{
struct worker *worker;
unsigned long flags;
int bkt;
raw_spin_lock_irqsave(&pool->lock, flags);
hash_for_each(pool->busy_hash, bkt, worker, hentry) {
if (task_is_running(worker->task)) {
/*
* Defer printing to avoid deadlocks in console
* drivers that queue work while holding locks
* also taken in their write paths.
*/
printk_deferred_enter();
pr_info("pool %d:\n", pool->id);
sched_show_task(worker->task);
printk_deferred_exit();
}
}
raw_spin_unlock_irqrestore(&pool->lock, flags);
}
static void show_cpu_pools_hogs(void)
{
struct worker_pool *pool;
int pi;
pr_info("Showing backtraces of running workers in stalled CPU-bound worker pools:\n");
rcu_read_lock();
for_each_pool(pool, pi) {
if (pool->cpu_stall)
show_cpu_pool_hog(pool);
}
rcu_read_unlock();
}
static void wq_watchdog_reset_touched(void) static void wq_watchdog_reset_touched(void)
{ {
int cpu; int cpu;
...@@ -5989,6 +6044,7 @@ static void wq_watchdog_timer_fn(struct timer_list *unused) ...@@ -5989,6 +6044,7 @@ static void wq_watchdog_timer_fn(struct timer_list *unused)
{ {
unsigned long thresh = READ_ONCE(wq_watchdog_thresh) * HZ; unsigned long thresh = READ_ONCE(wq_watchdog_thresh) * HZ;
bool lockup_detected = false; bool lockup_detected = false;
bool cpu_pool_stall = false;
unsigned long now = jiffies; unsigned long now = jiffies;
struct worker_pool *pool; struct worker_pool *pool;
int pi; int pi;
...@@ -6001,6 +6057,7 @@ static void wq_watchdog_timer_fn(struct timer_list *unused) ...@@ -6001,6 +6057,7 @@ static void wq_watchdog_timer_fn(struct timer_list *unused)
for_each_pool(pool, pi) { for_each_pool(pool, pi) {
unsigned long pool_ts, touched, ts; unsigned long pool_ts, touched, ts;
pool->cpu_stall = false;
if (list_empty(&pool->worklist)) if (list_empty(&pool->worklist))
continue; continue;
...@@ -6025,11 +6082,17 @@ static void wq_watchdog_timer_fn(struct timer_list *unused) ...@@ -6025,11 +6082,17 @@ static void wq_watchdog_timer_fn(struct timer_list *unused)
/* did we stall? */ /* did we stall? */
if (time_after(now, ts + thresh)) { if (time_after(now, ts + thresh)) {
lockup_detected = true; lockup_detected = true;
if (pool->cpu >= 0) {
pool->cpu_stall = true;
cpu_pool_stall = true;
}
pr_emerg("BUG: workqueue lockup - pool"); pr_emerg("BUG: workqueue lockup - pool");
pr_cont_pool_info(pool); pr_cont_pool_info(pool);
pr_cont(" stuck for %us!\n", pr_cont(" stuck for %us!\n",
jiffies_to_msecs(now - pool_ts) / 1000); jiffies_to_msecs(now - pool_ts) / 1000);
} }
} }
rcu_read_unlock(); rcu_read_unlock();
...@@ -6037,6 +6100,9 @@ static void wq_watchdog_timer_fn(struct timer_list *unused) ...@@ -6037,6 +6100,9 @@ static void wq_watchdog_timer_fn(struct timer_list *unused)
if (lockup_detected) if (lockup_detected)
show_all_workqueues(); show_all_workqueues();
if (cpu_pool_stall)
show_cpu_pools_hogs();
wq_watchdog_reset_touched(); wq_watchdog_reset_touched();
mod_timer(&wq_watchdog_timer, jiffies + thresh); mod_timer(&wq_watchdog_timer, jiffies + thresh);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment