Commit 6592ad2f authored by Thomas Gleixner's avatar Thomas Gleixner Committed by Ingo Molnar

watchdog/core, powerpc: Make watchdog_nmi_reconfigure() two stage

Both the perf reconfiguration and the powerpc watchdog_nmi_reconfigure()
need to be done in two steps.

     1) Stop all NMIs
     2) Read the new parameters and start NMIs

Right now watchdog_nmi_reconfigure() is a combination of both. To allow a
clean reconfiguration add a 'run' argument and split the functionality in
powerpc.
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Reviewed-by: default avatarDon Zickus <dzickus@redhat.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Chris Metcalf <cmetcalf@mellanox.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Nicholas Piggin <npiggin@gmail.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Sebastian Siewior <bigeasy@linutronix.de>
Cc: Ulrich Obergfell <uobergfe@redhat.com>
Cc: linuxppc-dev@lists.ozlabs.org
Link: http://lkml.kernel.org/r/20170912194147.862865570@linutronix.deSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 7feeb9cd
...@@ -355,17 +355,18 @@ static void watchdog_calc_timeouts(void) ...@@ -355,17 +355,18 @@ static void watchdog_calc_timeouts(void)
wd_timer_period_ms = watchdog_thresh * 1000 * 2 / 5; wd_timer_period_ms = watchdog_thresh * 1000 * 2 / 5;
} }
void watchdog_nmi_reconfigure(void) void watchdog_nmi_reconfigure(bool run)
{ {
int cpu; int cpu;
watchdog_calc_timeouts(); if (!run) {
for_each_cpu(cpu, &wd_cpus_enabled) for_each_cpu(cpu, &wd_cpus_enabled)
stop_wd_on_cpu(cpu); stop_wd_on_cpu(cpu);
} else {
watchdog_calc_timeouts();
for_each_cpu_and(cpu, cpu_online_mask, &watchdog_cpumask) for_each_cpu_and(cpu, cpu_online_mask, &watchdog_cpumask)
start_wd_on_cpu(cpu); start_wd_on_cpu(cpu);
}
} }
/* /*
......
...@@ -103,6 +103,8 @@ static inline void arch_touch_nmi_watchdog(void) {} ...@@ -103,6 +103,8 @@ static inline void arch_touch_nmi_watchdog(void) {}
#endif #endif
#endif #endif
void watchdog_nmi_reconfigure(bool run);
/** /**
* touch_nmi_watchdog - restart NMI watchdog timeout. * touch_nmi_watchdog - restart NMI watchdog timeout.
* *
......
...@@ -112,17 +112,25 @@ void __weak watchdog_nmi_disable(unsigned int cpu) ...@@ -112,17 +112,25 @@ void __weak watchdog_nmi_disable(unsigned int cpu)
hardlockup_detector_perf_disable(); hardlockup_detector_perf_disable();
} }
/* /**
* watchdog_nmi_reconfigure can be implemented to be notified after any * watchdog_nmi_reconfigure - Optional function to reconfigure NMI watchdogs
* watchdog configuration change. The arch hardlockup watchdog should * @run: If false stop the watchdogs on all enabled CPUs
* respond to the following variables: * If true start the watchdogs on all enabled CPUs
*
* The core call order is:
* watchdog_nmi_reconfigure(false);
* update_variables();
* watchdog_nmi_reconfigure(true);
*
* The second call which starts the watchdogs again guarantees that the
* following variables are stable across the call.
* - watchdog_enabled * - watchdog_enabled
* - watchdog_thresh * - watchdog_thresh
* - watchdog_cpumask * - watchdog_cpumask
* - sysctl_hardlockup_all_cpu_backtrace *
* - hardlockup_panic * After the call the variables can be changed again.
*/ */
void __weak watchdog_nmi_reconfigure(void) { } void __weak watchdog_nmi_reconfigure(bool run) { }
#ifdef CONFIG_SOFTLOCKUP_DETECTOR #ifdef CONFIG_SOFTLOCKUP_DETECTOR
...@@ -515,10 +523,12 @@ static void softlockup_unpark_threads(void) ...@@ -515,10 +523,12 @@ static void softlockup_unpark_threads(void)
static void softlockup_reconfigure_threads(bool enabled) static void softlockup_reconfigure_threads(bool enabled)
{ {
watchdog_nmi_reconfigure(false);
softlockup_park_all_threads(); softlockup_park_all_threads();
set_sample_period(); set_sample_period();
if (enabled) if (enabled)
softlockup_unpark_threads(); softlockup_unpark_threads();
watchdog_nmi_reconfigure(true);
} }
/* /*
...@@ -559,7 +569,11 @@ static inline void watchdog_unpark_threads(void) { } ...@@ -559,7 +569,11 @@ static inline void watchdog_unpark_threads(void) { }
static inline int watchdog_enable_all_cpus(void) { return 0; } static inline int watchdog_enable_all_cpus(void) { return 0; }
static inline void watchdog_disable_all_cpus(void) { } static inline void watchdog_disable_all_cpus(void) { }
static inline void softlockup_init_threads(void) { } static inline void softlockup_init_threads(void) { }
static inline void softlockup_reconfigure_threads(bool enabled) { } static void softlockup_reconfigure_threads(bool enabled)
{
watchdog_nmi_reconfigure(false);
watchdog_nmi_reconfigure(true);
}
#endif /* !CONFIG_SOFTLOCKUP_DETECTOR */ #endif /* !CONFIG_SOFTLOCKUP_DETECTOR */
static void __lockup_detector_cleanup(void) static void __lockup_detector_cleanup(void)
...@@ -599,7 +613,6 @@ static void proc_watchdog_update(void) ...@@ -599,7 +613,6 @@ static void proc_watchdog_update(void)
/* Remove impossible cpus to keep sysctl output clean. */ /* Remove impossible cpus to keep sysctl output clean. */
cpumask_and(&watchdog_cpumask, &watchdog_cpumask, cpu_possible_mask); cpumask_and(&watchdog_cpumask, &watchdog_cpumask, cpu_possible_mask);
softlockup_reconfigure_threads(watchdog_enabled && watchdog_thresh); softlockup_reconfigure_threads(watchdog_enabled && watchdog_thresh);
watchdog_nmi_reconfigure();
} }
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment