Commit b13b3b70 authored by Don Zickus's avatar Don Zickus Committed by Greg Kroah-Hartman

kernel/watchdog: prevent false hardlockup on overloaded system


[ Upstream commit b94f5118 ]

On an overloaded system, it is possible that a change in the watchdog
threshold can be delayed long enough to trigger a false positive.

This can easily be achieved by having a cpu spinning indefinitely on a
task, while another cpu updates watchdog threshold.

What happens is while trying to park the watchdog threads, the hrtimers
on the other cpus trigger and reprogram themselves with the new slower
watchdog threshold.  Meanwhile, the nmi watchdog is still programmed
with the old faster threshold.

Because the one cpu is blocked, it prevents the thread parking on the
other cpus from completing, which is needed to shutdown the nmi watchdog
and reprogram it correctly.  As a result, a false positive from the nmi
watchdog is reported.

Fix this by setting a park_in_progress flag to block all lockups until
the parking is complete.

Fix provided by Ulrich Obergfell.

[akpm@linux-foundation.org: s/park_in_progress/watchdog_park_in_progress/]
Link: http://lkml.kernel.org/r/1481041033-192236-1-git-send-email-dzickus@redhat.comSigned-off-by: default avatarDon Zickus <dzickus@redhat.com>
Reviewed-by: default avatarAaron Tomlin <atomlin@redhat.com>
Cc: Ulrich Obergfell <uobergfe@redhat.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: default avatarSasha Levin <alexander.levin@verizon.com>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 0ce66ee6
...@@ -110,6 +110,7 @@ extern int watchdog_user_enabled; ...@@ -110,6 +110,7 @@ extern int watchdog_user_enabled;
extern int watchdog_thresh; extern int watchdog_thresh;
extern unsigned long watchdog_enabled; extern unsigned long watchdog_enabled;
extern unsigned long *watchdog_cpumask_bits; extern unsigned long *watchdog_cpumask_bits;
extern atomic_t watchdog_park_in_progress;
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
extern int sysctl_softlockup_all_cpu_backtrace; extern int sysctl_softlockup_all_cpu_backtrace;
extern int sysctl_hardlockup_all_cpu_backtrace; extern int sysctl_hardlockup_all_cpu_backtrace;
......
...@@ -49,6 +49,8 @@ unsigned long *watchdog_cpumask_bits = cpumask_bits(&watchdog_cpumask); ...@@ -49,6 +49,8 @@ unsigned long *watchdog_cpumask_bits = cpumask_bits(&watchdog_cpumask);
#define for_each_watchdog_cpu(cpu) \ #define for_each_watchdog_cpu(cpu) \
for_each_cpu_and((cpu), cpu_online_mask, &watchdog_cpumask) for_each_cpu_and((cpu), cpu_online_mask, &watchdog_cpumask)
atomic_t watchdog_park_in_progress = ATOMIC_INIT(0);
/* /*
* The 'watchdog_running' variable is set to 1 when the watchdog threads * The 'watchdog_running' variable is set to 1 when the watchdog threads
* are registered/started and is set to 0 when the watchdog threads are * are registered/started and is set to 0 when the watchdog threads are
...@@ -260,6 +262,9 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer) ...@@ -260,6 +262,9 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
int duration; int duration;
int softlockup_all_cpu_backtrace = sysctl_softlockup_all_cpu_backtrace; int softlockup_all_cpu_backtrace = sysctl_softlockup_all_cpu_backtrace;
if (atomic_read(&watchdog_park_in_progress) != 0)
return HRTIMER_NORESTART;
/* kick the hardlockup detector */ /* kick the hardlockup detector */
watchdog_interrupt_count(); watchdog_interrupt_count();
...@@ -467,12 +472,16 @@ static int watchdog_park_threads(void) ...@@ -467,12 +472,16 @@ static int watchdog_park_threads(void)
{ {
int cpu, ret = 0; int cpu, ret = 0;
atomic_set(&watchdog_park_in_progress, 1);
for_each_watchdog_cpu(cpu) { for_each_watchdog_cpu(cpu) {
ret = kthread_park(per_cpu(softlockup_watchdog, cpu)); ret = kthread_park(per_cpu(softlockup_watchdog, cpu));
if (ret) if (ret)
break; break;
} }
atomic_set(&watchdog_park_in_progress, 0);
return ret; return ret;
} }
......
...@@ -84,6 +84,9 @@ static void watchdog_overflow_callback(struct perf_event *event, ...@@ -84,6 +84,9 @@ static void watchdog_overflow_callback(struct perf_event *event,
/* Ensure the watchdog never gets throttled */ /* Ensure the watchdog never gets throttled */
event->hw.interrupts = 0; event->hw.interrupts = 0;
if (atomic_read(&watchdog_park_in_progress) != 0)
return;
if (__this_cpu_read(watchdog_nmi_touch) == true) { if (__this_cpu_read(watchdog_nmi_touch) == true) {
__this_cpu_write(watchdog_nmi_touch, false); __this_cpu_write(watchdog_nmi_touch, false);
return; return;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment