Commit 51889d22 authored by Petr Mladek's avatar Petr Mladek

Merge branch 'rework/kthreads' into for-linus

parents 93d17c1c 07a22b61
......@@ -581,7 +581,6 @@ void __handle_sysrq(int key, bool check_mask)
rcu_sysrq_start();
rcu_read_lock();
printk_prefer_direct_enter();
/*
* Raise the apparent loglevel to maximum so that the sysrq header
* is shown to provide the user with positive feedback. We do not
......@@ -623,7 +622,6 @@ void __handle_sysrq(int key, bool check_mask)
pr_cont("\n");
console_loglevel = orig_log_level;
}
printk_prefer_direct_exit();
rcu_read_unlock();
rcu_sysrq_end();
......
......@@ -16,7 +16,6 @@
#include <linux/atomic.h>
#include <linux/types.h>
#include <linux/mutex.h>
struct vc_data;
struct console_font_op;
......@@ -154,22 +153,6 @@ struct console {
uint ospeed;
u64 seq;
unsigned long dropped;
struct task_struct *thread;
bool blocked;
/*
* The per-console lock is used by printing kthreads to synchronize
* this console with callers of console_lock(). This is necessary in
* order to allow printing kthreads to run in parallel to each other,
* while each safely accessing the @blocked field and synchronizing
* against direct printing via console_lock/console_unlock.
*
* Note: For synchronizing against direct printing via
* console_trylock/console_unlock, see the static global
* variable @console_kthreads_active.
*/
struct mutex lock;
void *data;
struct console *next;
};
......
......@@ -169,11 +169,7 @@ extern void __printk_safe_exit(void);
#define printk_deferred_enter __printk_safe_enter
#define printk_deferred_exit __printk_safe_exit
extern void printk_prefer_direct_enter(void);
extern void printk_prefer_direct_exit(void);
extern bool pr_flush(int timeout_ms, bool reset_on_progress);
extern void try_block_console_kthreads(int timeout_ms);
/*
* Please don't use printk_ratelimit(), because it shares ratelimiting state
......@@ -225,23 +221,11 @@ static inline void printk_deferred_exit(void)
{
}
static inline void printk_prefer_direct_enter(void)
{
}
static inline void printk_prefer_direct_exit(void)
{
}
static inline bool pr_flush(int timeout_ms, bool reset_on_progress)
{
return true;
}
static inline void try_block_console_kthreads(int timeout_ms)
{
}
static inline int printk_ratelimit(void)
{
return 0;
......
......@@ -127,8 +127,6 @@ static void check_hung_task(struct task_struct *t, unsigned long timeout)
* complain:
*/
if (sysctl_hung_task_warnings) {
printk_prefer_direct_enter();
if (sysctl_hung_task_warnings > 0)
sysctl_hung_task_warnings--;
pr_err("INFO: task %s:%d blocked for more than %ld seconds.\n",
......@@ -144,8 +142,6 @@ static void check_hung_task(struct task_struct *t, unsigned long timeout)
if (sysctl_hung_task_all_cpu_backtrace)
hung_task_show_all_bt = true;
printk_prefer_direct_exit();
}
touch_nmi_watchdog();
......@@ -208,17 +204,12 @@ static void check_hung_uninterruptible_tasks(unsigned long timeout)
}
unlock:
rcu_read_unlock();
if (hung_task_show_lock) {
printk_prefer_direct_enter();
if (hung_task_show_lock)
debug_show_all_locks();
printk_prefer_direct_exit();
}
if (hung_task_show_all_bt) {
hung_task_show_all_bt = false;
printk_prefer_direct_enter();
trigger_all_cpu_backtrace();
printk_prefer_direct_exit();
}
if (hung_task_call_panic)
......
......@@ -297,7 +297,6 @@ void panic(const char *fmt, ...)
* unfortunately means it may not be hardened to work in a
* panic situation.
*/
try_block_console_kthreads(10000);
smp_send_stop();
} else {
/*
......@@ -305,7 +304,6 @@ void panic(const char *fmt, ...)
* kmsg_dump, we will need architecture dependent extra
* works in addition to stopping other CPUs.
*/
try_block_console_kthreads(10000);
crash_smp_send_stop();
}
......@@ -605,8 +603,6 @@ void __warn(const char *file, int line, void *caller, unsigned taint,
{
disable_trace_on_warning();
printk_prefer_direct_enter();
if (file)
pr_warn("WARNING: CPU: %d PID: %d at %s:%d %pS\n",
raw_smp_processor_id(), current->pid, file, line,
......@@ -636,8 +632,6 @@ void __warn(const char *file, int line, void *caller, unsigned taint,
/* Just a warning, don't kill lockdep. */
add_taint(taint, LOCKDEP_STILL_OK);
printk_prefer_direct_exit();
}
#ifndef __WARN_FLAGS
......
......@@ -20,8 +20,6 @@ enum printk_info_flags {
LOG_CONT = 8, /* text is a fragment of a continuation line */
};
extern bool block_console_kthreads;
__printf(4, 0)
int vprintk_store(int facility, int level,
const struct dev_printk_info *dev_info,
......
This diff is collapsed.
......@@ -8,9 +8,7 @@
#include <linux/smp.h>
#include <linux/cpumask.h>
#include <linux/printk.h>
#include <linux/console.h>
#include <linux/kprobes.h>
#include <linux/delay.h>
#include "internal.h"
......@@ -52,33 +50,3 @@ asmlinkage int vprintk(const char *fmt, va_list args)
return vprintk_default(fmt, args);
}
EXPORT_SYMBOL(vprintk);
/**
* try_block_console_kthreads() - Try to block console kthreads and
* make the global console_lock() avaialble
*
* @timeout_ms: The maximum time (in ms) to wait.
*
* Prevent console kthreads from starting processing new messages. Wait
* until the global console_lock() become available.
*
* Context: Can be called in any context.
*/
void try_block_console_kthreads(int timeout_ms)
{
block_console_kthreads = true;
/* Do not wait when the console lock could not be safely taken. */
if (this_cpu_read(printk_context) || in_nmi())
return;
while (timeout_ms > 0) {
if (console_trylock()) {
console_unlock();
return;
}
udelay(1000);
timeout_ms -= 1;
}
}
......@@ -647,7 +647,6 @@ static void print_cpu_stall(unsigned long gps)
* See Documentation/RCU/stallwarn.rst for info on how to debug
* RCU CPU stall warnings.
*/
printk_prefer_direct_enter();
trace_rcu_stall_warning(rcu_state.name, TPS("SelfDetected"));
pr_err("INFO: %s self-detected stall on CPU\n", rcu_state.name);
raw_spin_lock_irqsave_rcu_node(rdp->mynode, flags);
......@@ -685,7 +684,6 @@ static void print_cpu_stall(unsigned long gps)
*/
set_tsk_need_resched(current);
set_preempt_need_resched();
printk_prefer_direct_exit();
}
static void check_cpu_stall(struct rcu_data *rdp)
......
......@@ -82,7 +82,6 @@ void kernel_restart_prepare(char *cmd)
{
blocking_notifier_call_chain(&reboot_notifier_list, SYS_RESTART, cmd);
system_state = SYSTEM_RESTART;
try_block_console_kthreads(10000);
usermodehelper_disable();
device_shutdown();
}
......@@ -271,7 +270,6 @@ static void kernel_shutdown_prepare(enum system_states state)
blocking_notifier_call_chain(&reboot_notifier_list,
(state == SYSTEM_HALT) ? SYS_HALT : SYS_POWER_OFF, NULL);
system_state = state;
try_block_console_kthreads(10000);
usermodehelper_disable();
device_shutdown();
}
......@@ -821,11 +819,9 @@ static int __orderly_reboot(void)
ret = run_cmd(reboot_cmd);
if (ret) {
printk_prefer_direct_enter();
pr_warn("Failed to start orderly reboot: forcing the issue\n");
emergency_sync();
kernel_restart(NULL);
printk_prefer_direct_exit();
}
return ret;
......@@ -838,7 +834,6 @@ static int __orderly_poweroff(bool force)
ret = run_cmd(poweroff_cmd);
if (ret && force) {
printk_prefer_direct_enter();
pr_warn("Failed to start orderly shutdown: forcing the issue\n");
/*
......@@ -848,7 +843,6 @@ static int __orderly_poweroff(bool force)
*/
emergency_sync();
kernel_power_off();
printk_prefer_direct_exit();
}
return ret;
......@@ -906,8 +900,6 @@ EXPORT_SYMBOL_GPL(orderly_reboot);
*/
static void hw_failure_emergency_poweroff_func(struct work_struct *work)
{
printk_prefer_direct_enter();
/*
* We have reached here after the emergency shutdown waiting period has
* expired. This means orderly_poweroff has not been able to shut off
......@@ -924,8 +916,6 @@ static void hw_failure_emergency_poweroff_func(struct work_struct *work)
*/
pr_emerg("Hardware protection shutdown failed. Trying emergency restart\n");
emergency_restart();
printk_prefer_direct_exit();
}
static DECLARE_DELAYED_WORK(hw_failure_emergency_poweroff_work,
......@@ -964,13 +954,11 @@ void hw_protection_shutdown(const char *reason, int ms_until_forced)
{
static atomic_t allow_proceed = ATOMIC_INIT(1);
printk_prefer_direct_enter();
pr_emerg("HARDWARE PROTECTION shutdown (%s)\n", reason);
/* Shutdown should be initiated only once. */
if (!atomic_dec_and_test(&allow_proceed))
goto out;
return;
/*
* Queue a backup emergency shutdown in the event of
......@@ -978,8 +966,6 @@ void hw_protection_shutdown(const char *reason, int ms_until_forced)
*/
hw_failure_emergency_poweroff(ms_until_forced);
orderly_poweroff(true);
out:
printk_prefer_direct_exit();
}
EXPORT_SYMBOL_GPL(hw_protection_shutdown);
......
......@@ -424,8 +424,6 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
/* Start period for the next softlockup warning. */
update_report_ts();
printk_prefer_direct_enter();
pr_emerg("BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]\n",
smp_processor_id(), duration,
current->comm, task_pid_nr(current));
......@@ -444,8 +442,6 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
add_taint(TAINT_SOFTLOCKUP, LOCKDEP_STILL_OK);
if (softlockup_panic)
panic("softlockup: hung tasks");
printk_prefer_direct_exit();
}
return HRTIMER_RESTART;
......
......@@ -135,8 +135,6 @@ static void watchdog_overflow_callback(struct perf_event *event,
if (__this_cpu_read(hard_watchdog_warn) == true)
return;
printk_prefer_direct_enter();
pr_emerg("Watchdog detected hard LOCKUP on cpu %d\n",
this_cpu);
print_modules();
......@@ -157,8 +155,6 @@ static void watchdog_overflow_callback(struct perf_event *event,
if (hardlockup_panic)
nmi_panic(regs, "Hard LOCKUP");
printk_prefer_direct_exit();
__this_cpu_write(hard_watchdog_warn, true);
return;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment