panic.c 20.4 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0-only
Linus Torvalds's avatar
Linus Torvalds committed
2 3 4 5 6 7 8 9 10 11
/*
 *  linux/kernel/panic.c
 *
 *  Copyright (C) 1991, 1992  Linus Torvalds
 */

/*
 * This function is used through-out the kernel (including mm and fs)
 * to indicate a major problem.
 */
Ingo Molnar's avatar
Ingo Molnar committed
12
#include <linux/debug_locks.h>
13
#include <linux/sched/debug.h>
Ingo Molnar's avatar
Ingo Molnar committed
14
#include <linux/interrupt.h>
15
#include <linux/kgdb.h>
16
#include <linux/kmsg_dump.h>
Ingo Molnar's avatar
Ingo Molnar committed
17 18
#include <linux/kallsyms.h>
#include <linux/notifier.h>
19
#include <linux/vt_kern.h>
Linus Torvalds's avatar
Linus Torvalds committed
20
#include <linux/module.h>
Ingo Molnar's avatar
Ingo Molnar committed
21
#include <linux/random.h>
22
#include <linux/ftrace.h>
Linus Torvalds's avatar
Linus Torvalds committed
23
#include <linux/reboot.h>
Ingo Molnar's avatar
Ingo Molnar committed
24 25
#include <linux/delay.h>
#include <linux/kexec.h>
26
#include <linux/panic_notifier.h>
Ingo Molnar's avatar
Ingo Molnar committed
27
#include <linux/sched.h>
28
#include <linux/string_helpers.h>
Linus Torvalds's avatar
Linus Torvalds committed
29
#include <linux/sysrq.h>
Ingo Molnar's avatar
Ingo Molnar committed
30
#include <linux/init.h>
Linus Torvalds's avatar
Linus Torvalds committed
31
#include <linux/nmi.h>
32
#include <linux/console.h>
33
#include <linux/bug.h>
34
#include <linux/ratelimit.h>
35
#include <linux/debugfs.h>
36
#include <linux/sysfs.h>
37
#include <linux/context_tracking.h>
38
#include <trace/events/error_report.h>
39
#include <asm/sections.h>
Linus Torvalds's avatar
Linus Torvalds committed
40

41 42 43
#define PANIC_TIMER_STEP 100
#define PANIC_BLINK_SPD 18

44 45 46 47 48
#ifdef CONFIG_SMP
/*
 * Should we dump all CPUs backtraces in an oops event?
 * Defaults to 0, can be changed via sysctl.
 */
49 50 51
static unsigned int __read_mostly sysctl_oops_all_cpu_backtrace;
#else
#define sysctl_oops_all_cpu_backtrace 0
52 53
#endif /* CONFIG_SMP */

54
int panic_on_oops = CONFIG_PANIC_ON_OOPS_VALUE;
55
static unsigned long tainted_mask =
56
	IS_ENABLED(CONFIG_RANDSTRUCT) ? (1 << TAINT_RANDSTRUCT) : 0;
57 58 59
static int pause_on_oops;
static int pause_on_oops_flag;
static DEFINE_SPINLOCK(pause_on_oops_lock);
60
bool crash_kexec_post_notifiers;
Prarit Bhargava's avatar
Prarit Bhargava committed
61
int panic_on_warn __read_mostly;
Rafael Aquini's avatar
Rafael Aquini committed
62 63
unsigned long panic_on_taint;
bool panic_on_taint_nousertaint = false;
Kees Cook's avatar
Kees Cook committed
64
static unsigned int warn_limit __read_mostly;
Linus Torvalds's avatar
Linus Torvalds committed
65

66
int panic_timeout = CONFIG_PANIC_TIMEOUT;
67
EXPORT_SYMBOL_GPL(panic_timeout);
Linus Torvalds's avatar
Linus Torvalds committed
68

69 70 71 72 73
#define PANIC_PRINT_TASK_INFO		0x00000001
#define PANIC_PRINT_MEM_INFO		0x00000002
#define PANIC_PRINT_TIMER_INFO		0x00000004
#define PANIC_PRINT_LOCK_INFO		0x00000008
#define PANIC_PRINT_FTRACE_INFO		0x00000010
74
#define PANIC_PRINT_ALL_PRINTK_MSG	0x00000020
75
#define PANIC_PRINT_ALL_CPU_BT		0x00000040
76
unsigned long panic_print;
77

78
ATOMIC_NOTIFIER_HEAD(panic_notifier_list);
Linus Torvalds's avatar
Linus Torvalds committed
79 80 81

EXPORT_SYMBOL(panic_notifier_list);

82
#ifdef CONFIG_SYSCTL
83
static struct ctl_table kern_panic_table[] = {
84
#ifdef CONFIG_SMP
85 86 87 88 89 90 91 92 93
	{
		.procname       = "oops_all_cpu_backtrace",
		.data           = &sysctl_oops_all_cpu_backtrace,
		.maxlen         = sizeof(int),
		.mode           = 0644,
		.proc_handler   = proc_dointvec_minmax,
		.extra1         = SYSCTL_ZERO,
		.extra2         = SYSCTL_ONE,
	},
94
#endif
Kees Cook's avatar
Kees Cook committed
95 96 97 98 99 100 101
	{
		.procname       = "warn_limit",
		.data           = &warn_limit,
		.maxlen         = sizeof(warn_limit),
		.mode           = 0644,
		.proc_handler   = proc_douintvec,
	},
102 103 104 105 106 107 108 109 110 111 112
	{ }
};

static __init int kernel_panic_sysctls_init(void)
{
	register_sysctl_init("kernel", kern_panic_table);
	return 0;
}
late_initcall(kernel_panic_sysctls_init);
#endif

113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131
static atomic_t warn_count = ATOMIC_INIT(0);

#ifdef CONFIG_SYSFS
static ssize_t warn_count_show(struct kobject *kobj, struct kobj_attribute *attr,
			       char *page)
{
	return sysfs_emit(page, "%d\n", atomic_read(&warn_count));
}

static struct kobj_attribute warn_count_attr = __ATTR_RO(warn_count);

static __init int kernel_panic_sysfs_init(void)
{
	sysfs_add_file_to_group(kernel_kobj, &warn_count_attr.attr, NULL);
	return 0;
}
late_initcall(kernel_panic_sysfs_init);
#endif

132
static long no_blink(int state)
133
{
134
	return 0;
135 136
}

137 138 139 140
/* Returns how long it waited in ms */
long (*panic_blink)(int state);
EXPORT_SYMBOL(panic_blink);

141 142 143
/*
 * Stop ourself in panic -- architecture code may override this
 */
144
void __weak __noreturn panic_smp_self_stop(void)
145 146 147 148 149
{
	while (1)
		cpu_relax();
}

150 151 152 153
/*
 * Stop ourselves in NMI context if another CPU has already panicked. Arch code
 * may override this to prepare for crash dumping, e.g. save regs info.
 */
154
void __weak __noreturn nmi_panic_self_stop(struct pt_regs *regs)
155 156 157 158
{
	panic_smp_self_stop();
}

159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184
/*
 * Stop other CPUs in panic.  Architecture dependent code may override this
 * with more suitable version.  For example, if the architecture supports
 * crash dump, it should save registers of each stopped CPU and disable
 * per-CPU features such as virtualization extensions.
 */
void __weak crash_smp_send_stop(void)
{
	static int cpus_stopped;

	/*
	 * This function can be called twice in panic path, but obviously
	 * we execute this only once.
	 */
	if (cpus_stopped)
		return;

	/*
	 * Note smp_send_stop is the usual smp shutdown function, which
	 * unfortunately means it may not be hardened to work in a panic
	 * situation.
	 */
	smp_send_stop();
	cpus_stopped = 1;
}

185 186
atomic_t panic_cpu = ATOMIC_INIT(PANIC_CPU_INVALID);

187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206
/*
 * A variant of panic() called from NMI context. We return if we've already
 * panicked on this CPU. If another CPU already panicked, loop in
 * nmi_panic_self_stop() which can provide architecture dependent code such
 * as saving register state for crash dump.
 */
void nmi_panic(struct pt_regs *regs, const char *msg)
{
	int old_cpu, cpu;

	cpu = raw_smp_processor_id();
	old_cpu = atomic_cmpxchg(&panic_cpu, PANIC_CPU_INVALID, cpu);

	if (old_cpu == PANIC_CPU_INVALID)
		panic("%s", msg);
	else if (old_cpu != cpu)
		nmi_panic_self_stop(regs);
}
EXPORT_SYMBOL(nmi_panic);

207
static void panic_print_sys_info(bool console_flush)
208
{
209 210 211 212 213
	if (console_flush) {
		if (panic_print & PANIC_PRINT_ALL_PRINTK_MSG)
			console_flush_on_panic(CONSOLE_REPLAY_ALL);
		return;
	}
214

215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230
	if (panic_print & PANIC_PRINT_TASK_INFO)
		show_state();

	if (panic_print & PANIC_PRINT_MEM_INFO)
		show_mem(0, NULL);

	if (panic_print & PANIC_PRINT_TIMER_INFO)
		sysrq_timer_list_show();

	if (panic_print & PANIC_PRINT_LOCK_INFO)
		debug_show_all_locks();

	if (panic_print & PANIC_PRINT_FTRACE_INFO)
		ftrace_dump(DUMP_ALL);
}

231 232
void check_panic_on_warn(const char *origin)
{
233 234
	unsigned int limit;

235 236
	if (panic_on_warn)
		panic("%s: panic_on_warn set ...\n", origin);
Kees Cook's avatar
Kees Cook committed
237

238 239
	limit = READ_ONCE(warn_limit);
	if (atomic_inc_return(&warn_count) >= limit && limit)
Kees Cook's avatar
Kees Cook committed
240
		panic("%s: system warned too often (kernel.warn_limit is %d)",
241
		      origin, limit);
242 243
}

244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267
/*
 * Helper that triggers the NMI backtrace (if set in panic_print)
 * and then performs the secondary CPUs shutdown - we cannot have
 * the NMI backtrace after the CPUs are off!
 */
static void panic_other_cpus_shutdown(bool crash_kexec)
{
	if (panic_print & PANIC_PRINT_ALL_CPU_BT)
		trigger_all_cpu_backtrace();

	/*
	 * Note that smp_send_stop() is the usual SMP shutdown function,
	 * which unfortunately may not be hardened to work in a panic
	 * situation. If we want to do crash dump after notifier calls
	 * and kmsg_dump, we will need architecture dependent extra
	 * bits in addition to stopping other CPUs, hence we rely on
	 * crash_smp_send_stop() for that.
	 */
	if (!crash_kexec)
		smp_send_stop();
	else
		crash_smp_send_stop();
}

Linus Torvalds's avatar
Linus Torvalds committed
268 269 270 271 272 273 274 275
/**
 *	panic - halt the system
 *	@fmt: The text string to print
 *
 *	Display a message, then perform cleanups.
 *
 *	This function never returns.
 */
276
void panic(const char *fmt, ...)
Linus Torvalds's avatar
Linus Torvalds committed
277 278 279
{
	static char buf[1024];
	va_list args;
280
	long i, i_next = 0, len;
281
	int state = 0;
282
	int old_cpu, this_cpu;
283
	bool _crash_kexec_post_notifiers = crash_kexec_post_notifiers;
Linus Torvalds's avatar
Linus Torvalds committed
284

285 286 287 288 289 290 291 292 293 294
	if (panic_on_warn) {
		/*
		 * This thread may hit another WARN() in the panic path.
		 * Resetting this prevents additional WARN() from panicking the
		 * system on this thread.  Other threads are blocked by the
		 * panic_mutex in panic().
		 */
		panic_on_warn = 0;
	}

295 296 297 298
	/*
	 * Disable local interrupts. This will prevent panic_smp_self_stop
	 * from deadlocking the first cpu that invokes the panic, since
	 * there is nothing to prevent an interrupt handler (that runs
299
	 * after setting panic_cpu) from invoking panic() again.
300 301
	 */
	local_irq_disable();
302
	preempt_disable_notrace();
303

304
	/*
Ingo Molnar's avatar
Ingo Molnar committed
305 306
	 * It's possible to come here directly from a panic-assertion and
	 * not have preempt disabled. Some functions called from here want
307
	 * preempt to be disabled. No point enabling it later though...
308 309 310 311 312
	 *
	 * Only one CPU is allowed to execute the panic code from here. For
	 * multiple parallel invocations of panic, all other CPUs either
	 * stop themself or will wait until they are stopped by the 1st CPU
	 * with smp_send_stop().
313 314 315 316 317
	 *
	 * `old_cpu == PANIC_CPU_INVALID' means this is the 1st CPU which
	 * comes here, so go ahead.
	 * `old_cpu == this_cpu' means we came from nmi_panic() which sets
	 * panic_cpu to this CPU.  In this case, this is also the 1st CPU.
318
	 */
319 320 321 322
	this_cpu = raw_smp_processor_id();
	old_cpu  = atomic_cmpxchg(&panic_cpu, PANIC_CPU_INVALID, this_cpu);

	if (old_cpu != PANIC_CPU_INVALID && old_cpu != this_cpu)
323
		panic_smp_self_stop();
324

325
	console_verbose();
Linus Torvalds's avatar
Linus Torvalds committed
326 327
	bust_spinlocks(1);
	va_start(args, fmt);
328
	len = vscnprintf(buf, sizeof(buf), fmt, args);
Linus Torvalds's avatar
Linus Torvalds committed
329
	va_end(args);
330 331 332 333

	if (len && buf[len - 1] == '\n')
		buf[len - 1] = '\0';

334
	pr_emerg("Kernel panic - not syncing: %s\n", buf);
335
#ifdef CONFIG_DEBUG_BUGVERBOSE
336 337 338
	/*
	 * Avoid nested stack-dumping if a panic occurs during oops processing
	 */
339
	if (!test_taint(TAINT_DIE) && oops_in_progress <= 1)
340
		dump_stack();
341
#endif
Linus Torvalds's avatar
Linus Torvalds committed
342

343 344 345 346 347 348 349
	/*
	 * If kgdb is enabled, give it a chance to run before we stop all
	 * the other CPUs or else we won't be able to debug processes left
	 * running on them.
	 */
	kgdb_panic(buf);

350 351 352
	/*
	 * If we have crashed and we have a crash kernel loaded let it handle
	 * everything else.
353 354
	 * If we want to run this after calling panic_notifiers, pass
	 * the "crash_kexec_post_notifiers" option to the kernel.
355 356
	 *
	 * Bypass the panic_cpu check and call __crash_kexec directly.
357
	 */
358
	if (!_crash_kexec_post_notifiers)
359
		__crash_kexec(NULL);
360

361
	panic_other_cpus_shutdown(_crash_kexec_post_notifiers);
Linus Torvalds's avatar
Linus Torvalds committed
362

363 364 365 366
	/*
	 * Run any panic handlers, including those that might need to
	 * add information to the kmsg dump output.
	 */
367
	atomic_notifier_call_chain(&panic_notifier_list, 0, buf);
Linus Torvalds's avatar
Linus Torvalds committed
368

369 370
	panic_print_sys_info(false);

371 372
	kmsg_dump(KMSG_DUMP_PANIC);

373 374 375 376 377 378
	/*
	 * If you doubt kdump always works fine in any situation,
	 * "crash_kexec_post_notifiers" offers you a chance to run
	 * panic_notifiers and dumping kmsg before kdump.
	 * Note: since some panic_notifiers can make crashed kernel
	 * more unstable, it can increase risks of the kdump failure too.
379 380
	 *
	 * Bypass the panic_cpu check and call __crash_kexec directly.
381
	 */
382
	if (_crash_kexec_post_notifiers)
383
		__crash_kexec(NULL);
384

385
	console_unblank();
386

387 388 389 390
	/*
	 * We may have ended up stopping the CPU holding the lock (in
	 * smp_send_stop()) while still having some valuable data in the console
	 * buffer.  Try to acquire the lock then release it regardless of the
391 392 393
	 * result.  The release will also print the buffers out.  Locks debug
	 * should be disabled to avoid reporting bad unlock balance when
	 * panic() is not being callled from OOPS.
394
	 */
395
	debug_locks_off();
396
	console_flush_on_panic(CONSOLE_FLUSH_PENDING);
397

398
	panic_print_sys_info(true);
399

400 401 402
	if (!panic_blink)
		panic_blink = no_blink;

403
	if (panic_timeout > 0) {
Linus Torvalds's avatar
Linus Torvalds committed
404
		/*
Ingo Molnar's avatar
Ingo Molnar committed
405 406 407
		 * Delay timeout seconds before rebooting the machine.
		 * We can't use the "normal" timers since we just panicked.
		 */
408
		pr_emerg("Rebooting in %d seconds..\n", panic_timeout);
Ingo Molnar's avatar
Ingo Molnar committed
409

410
		for (i = 0; i < panic_timeout * 1000; i += PANIC_TIMER_STEP) {
Linus Torvalds's avatar
Linus Torvalds committed
411
			touch_nmi_watchdog();
412 413 414 415 416
			if (i >= i_next) {
				i += panic_blink(state ^= 1);
				i_next = i + 3600 / PANIC_BLINK_SPD;
			}
			mdelay(PANIC_TIMER_STEP);
Linus Torvalds's avatar
Linus Torvalds committed
417
		}
418 419
	}
	if (panic_timeout != 0) {
Ingo Molnar's avatar
Ingo Molnar committed
420 421 422 423
		/*
		 * This will not be a clean reboot, with everything
		 * shutting down.  But if there is a chance of
		 * rebooting the system it will be rebooted.
Linus Torvalds's avatar
Linus Torvalds committed
424
		 */
425 426
		if (panic_reboot_mode != REBOOT_UNDEFINED)
			reboot_mode = panic_reboot_mode;
427
		emergency_restart();
Linus Torvalds's avatar
Linus Torvalds committed
428 429 430 431
	}
#ifdef __sparc__
	{
		extern int stop_a_enabled;
432
		/* Make sure the user can actually press Stop-A (L1-A) */
Linus Torvalds's avatar
Linus Torvalds committed
433
		stop_a_enabled = 1;
434 435
		pr_emerg("Press Stop-A (L1-A) from sun keyboard or send break\n"
			 "twice on console to return to the boot prom\n");
Linus Torvalds's avatar
Linus Torvalds committed
436 437
	}
#endif
438
#if defined(CONFIG_S390)
439
	disabled_wait();
Linus Torvalds's avatar
Linus Torvalds committed
440
#endif
441
	pr_emerg("---[ end Kernel panic - not syncing: %s ]---\n", buf);
442 443 444

	/* Do not scroll important messages printed above */
	suppress_printk = 1;
Linus Torvalds's avatar
Linus Torvalds committed
445
	local_irq_enable();
446
	for (i = 0; ; i += PANIC_TIMER_STEP) {
447
		touch_softlockup_watchdog();
448 449 450 451 452
		if (i >= i_next) {
			i += panic_blink(state ^= 1);
			i_next = i + 3600 / PANIC_BLINK_SPD;
		}
		mdelay(PANIC_TIMER_STEP);
Linus Torvalds's avatar
Linus Torvalds committed
453 454 455 456 457
	}
}

EXPORT_SYMBOL(panic);

458 459 460 461 462
/*
 * TAINT_FORCED_RMMOD could be a per-module flag but the module
 * is being removed anyway.
 */
const struct taint_flag taint_flags[TAINT_FLAGS_COUNT] = {
463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479
	[ TAINT_PROPRIETARY_MODULE ]	= { 'P', 'G', true },
	[ TAINT_FORCED_MODULE ]		= { 'F', ' ', true },
	[ TAINT_CPU_OUT_OF_SPEC ]	= { 'S', ' ', false },
	[ TAINT_FORCED_RMMOD ]		= { 'R', ' ', false },
	[ TAINT_MACHINE_CHECK ]		= { 'M', ' ', false },
	[ TAINT_BAD_PAGE ]		= { 'B', ' ', false },
	[ TAINT_USER ]			= { 'U', ' ', false },
	[ TAINT_DIE ]			= { 'D', ' ', false },
	[ TAINT_OVERRIDDEN_ACPI_TABLE ]	= { 'A', ' ', false },
	[ TAINT_WARN ]			= { 'W', ' ', false },
	[ TAINT_CRAP ]			= { 'C', ' ', true },
	[ TAINT_FIRMWARE_WORKAROUND ]	= { 'I', ' ', false },
	[ TAINT_OOT_MODULE ]		= { 'O', ' ', true },
	[ TAINT_UNSIGNED_MODULE ]	= { 'E', ' ', true },
	[ TAINT_SOFTLOCKUP ]		= { 'L', ' ', false },
	[ TAINT_LIVEPATCH ]		= { 'K', ' ', true },
	[ TAINT_AUX ]			= { 'X', ' ', true },
480
	[ TAINT_RANDSTRUCT ]		= { 'T', ' ', true },
481
	[ TAINT_TEST ]			= { 'N', ' ', true },
Andi Kleen's avatar
Andi Kleen committed
482 483
};

Linus Torvalds's avatar
Linus Torvalds committed
484
/**
485
 * print_tainted - return a string to represent the kernel taint state.
Linus Torvalds's avatar
Linus Torvalds committed
486
 *
487
 * For individual taint flag meanings, see Documentation/admin-guide/sysctl/kernel.rst
Linus Torvalds's avatar
Linus Torvalds committed
488
 *
489 490
 * The string is overwritten by the next call to print_tainted(),
 * but is always NULL terminated.
Linus Torvalds's avatar
Linus Torvalds committed
491 492 493
 */
const char *print_tainted(void)
{
494
	static char buf[TAINT_FLAGS_COUNT + sizeof("Tainted: ")];
Andi Kleen's avatar
Andi Kleen committed
495

496 497
	BUILD_BUG_ON(ARRAY_SIZE(taint_flags) != TAINT_FLAGS_COUNT);

Andi Kleen's avatar
Andi Kleen committed
498 499 500 501 502
	if (tainted_mask) {
		char *s;
		int i;

		s = buf + sprintf(buf, "Tainted: ");
503 504 505
		for (i = 0; i < TAINT_FLAGS_COUNT; i++) {
			const struct taint_flag *t = &taint_flags[i];
			*s++ = test_bit(i, &tainted_mask) ?
506
					t->c_true : t->c_false;
Andi Kleen's avatar
Andi Kleen committed
507 508 509
		}
		*s = 0;
	} else
Linus Torvalds's avatar
Linus Torvalds committed
510
		snprintf(buf, sizeof(buf), "Not tainted");
Ingo Molnar's avatar
Ingo Molnar committed
511 512

	return buf;
Linus Torvalds's avatar
Linus Torvalds committed
513 514
}

Andi Kleen's avatar
Andi Kleen committed
515
int test_taint(unsigned flag)
Linus Torvalds's avatar
Linus Torvalds committed
516
{
Andi Kleen's avatar
Andi Kleen committed
517 518 519 520 521 522 523
	return test_bit(flag, &tainted_mask);
}
EXPORT_SYMBOL(test_taint);

unsigned long get_taint(void)
{
	return tainted_mask;
Linus Torvalds's avatar
Linus Torvalds committed
524
}
525

526 527 528 529 530 531 532 533 534
/**
 * add_taint: add a taint flag if not already set.
 * @flag: one of the TAINT_* constants.
 * @lockdep_ok: whether lock debugging is still OK.
 *
 * If something bad has gone wrong, you'll want @lockdebug_ok = false, but for
 * some notewortht-but-not-corrupting cases, it can be set to true.
 */
void add_taint(unsigned flag, enum lockdep_ok lockdep_ok)
535
{
536
	if (lockdep_ok == LOCKDEP_NOW_UNRELIABLE && __debug_locks_off())
537
		pr_warn("Disabling lock debugging due to kernel taint\n");
538

Andi Kleen's avatar
Andi Kleen committed
539
	set_bit(flag, &tainted_mask);
Rafael Aquini's avatar
Rafael Aquini committed
540 541 542 543 544

	if (tainted_mask & panic_on_taint) {
		panic_on_taint = 0;
		panic("panic_on_taint set ...");
	}
545
}
Linus Torvalds's avatar
Linus Torvalds committed
546
EXPORT_SYMBOL(add_taint);
547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597

static void spin_msec(int msecs)
{
	int i;

	for (i = 0; i < msecs; i++) {
		touch_nmi_watchdog();
		mdelay(1);
	}
}

/*
 * It just happens that oops_enter() and oops_exit() are identically
 * implemented...
 */
static void do_oops_enter_exit(void)
{
	unsigned long flags;
	static int spin_counter;

	if (!pause_on_oops)
		return;

	spin_lock_irqsave(&pause_on_oops_lock, flags);
	if (pause_on_oops_flag == 0) {
		/* This CPU may now print the oops message */
		pause_on_oops_flag = 1;
	} else {
		/* We need to stall this CPU */
		if (!spin_counter) {
			/* This CPU gets to do the counting */
			spin_counter = pause_on_oops;
			do {
				spin_unlock(&pause_on_oops_lock);
				spin_msec(MSEC_PER_SEC);
				spin_lock(&pause_on_oops_lock);
			} while (--spin_counter);
			pause_on_oops_flag = 0;
		} else {
			/* This CPU waits for a different one */
			while (spin_counter) {
				spin_unlock(&pause_on_oops_lock);
				spin_msec(1);
				spin_lock(&pause_on_oops_lock);
			}
		}
	}
	spin_unlock_irqrestore(&pause_on_oops_lock, flags);
}

/*
Ingo Molnar's avatar
Ingo Molnar committed
598 599
 * Return true if the calling CPU is allowed to print oops-related info.
 * This is a bit racy..
600
 */
601
bool oops_may_print(void)
602 603 604 605 606 607
{
	return pause_on_oops_flag == 0;
}

/*
 * Called when the architecture enters its oops handler, before it prints
Ingo Molnar's avatar
Ingo Molnar committed
608 609
 * anything.  If this is the first CPU to oops, and it's oopsing the first
 * time then let it proceed.
610
 *
Ingo Molnar's avatar
Ingo Molnar committed
611 612 613 614
 * This is all enabled by the pause_on_oops kernel boot option.  We do all
 * this to ensure that oopses don't scroll off the screen.  It has the
 * side-effect of preventing later-oopsing CPUs from mucking up the display,
 * too.
615
 *
Ingo Molnar's avatar
Ingo Molnar committed
616 617 618
 * It turns out that the CPU which is allowed to print ends up pausing for
 * the right duration, whereas all the other CPUs pause for twice as long:
 * once in oops_enter(), once in oops_exit().
619 620 621
 */
void oops_enter(void)
{
622
	tracing_off();
Ingo Molnar's avatar
Ingo Molnar committed
623 624
	/* can't trust the integrity of the kernel anymore: */
	debug_locks_off();
625
	do_oops_enter_exit();
626 627 628

	if (sysctl_oops_all_cpu_backtrace)
		trigger_all_cpu_backtrace();
629 630
}

631
static void print_oops_end_marker(void)
632
{
633
	pr_warn("---[ end trace %016llx ]---\n", 0ULL);
634 635
}

636 637 638 639 640 641 642
/*
 * Called when the architecture exits its oops handler, after printing
 * everything.
 */
void oops_exit(void)
{
	do_oops_enter_exit();
643
	print_oops_end_marker();
644
	kmsg_dump(KMSG_DUMP_OOPS);
645
}
646

647
struct warn_args {
648
	const char *fmt;
649
	va_list args;
650
};
651

652 653
void __warn(const char *file, int line, void *caller, unsigned taint,
	    struct pt_regs *regs, struct warn_args *args)
654
{
655 656
	disable_trace_on_warning();

657 658 659 660 661 662 663
	if (file)
		pr_warn("WARNING: CPU: %d PID: %d at %s:%d %pS\n",
			raw_smp_processor_id(), current->pid, file, line,
			caller);
	else
		pr_warn("WARNING: CPU: %d PID: %d at %pS\n",
			raw_smp_processor_id(), current->pid, caller);
664

665 666
	if (args)
		vprintk(args->fmt, args->args);
667

668 669 670 671 672
	print_modules();

	if (regs)
		show_regs(regs);

673
	check_panic_on_warn("kernel");
Prarit Bhargava's avatar
Prarit Bhargava committed
674

675 676
	if (!regs)
		dump_stack();
677

678 679
	print_irqtrace_events(current);

680
	print_oops_end_marker();
681
	trace_error_report_end(ERROR_DETECTOR_WARN, (unsigned long)caller);
682

683 684
	/* Just a warning, don't kill lockdep. */
	add_taint(taint, LOCKDEP_STILL_OK);
685
}
686

687
#ifdef CONFIG_BUG
688
#ifndef __WARN_FLAGS
689 690
void warn_slowpath_fmt(const char *file, int line, unsigned taint,
		       const char *fmt, ...)
691
{
692
	bool rcu = warn_rcu_enter();
693
	struct warn_args args;
694

695 696
	pr_warn(CUT_HERE);

697 698 699 700 701 702
	if (!fmt) {
		__warn(file, line, __builtin_return_address(0), taint,
		       NULL, NULL);
		return;
	}

703 704
	args.fmt = fmt;
	va_start(args.args, fmt);
705
	__warn(file, line, __builtin_return_address(0), taint, NULL, &args);
706
	va_end(args.args);
707
	warn_rcu_exit(rcu);
708
}
709
EXPORT_SYMBOL(warn_slowpath_fmt);
710 711 712
#else
void __warn_printk(const char *fmt, ...)
{
713
	bool rcu = warn_rcu_enter();
714 715 716 717 718 719 720
	va_list args;

	pr_warn(CUT_HERE);

	va_start(args, fmt);
	vprintk(fmt, args);
	va_end(args);
721
	warn_rcu_exit(rcu);
722 723
}
EXPORT_SYMBOL(__warn_printk);
724 725
#endif

726 727 728 729
/* Support resetting WARN*_ONCE state */

static int clear_warn_once_set(void *data, u64 val)
{
730
	generic_bug_clear_once();
731 732 733 734
	memset(__start_once, 0, __end_once - __start_once);
	return 0;
}

735 736
DEFINE_DEBUGFS_ATTRIBUTE(clear_warn_once_fops, NULL, clear_warn_once_set,
			 "%lld\n");
737 738 739 740

static __init int register_warn_debugfs(void)
{
	/* Don't care about failure */
741 742
	debugfs_create_file_unsafe("clear_warn_once", 0200, NULL, NULL,
				   &clear_warn_once_fops);
743 744 745 746 747 748
	return 0;
}

device_initcall(register_warn_debugfs);
#endif

749
#ifdef CONFIG_STACKPROTECTOR
750

751 752 753 754
/*
 * Called when gcc's -fstack-protector feature is used, and
 * gcc detects corruption of the on-stack canary value
 */
755
__visible noinstr void __stack_chk_fail(void)
756
{
757
	instrumentation_begin();
758
	panic("stack-protector: Kernel stack is corrupted in: %pB",
759
		__builtin_return_address(0));
760
	instrumentation_end();
761 762
}
EXPORT_SYMBOL(__stack_chk_fail);
763

764
#endif
765 766

core_param(panic, panic_timeout, int, 0644);
767
core_param(panic_print, panic_print, ulong, 0644);
768
core_param(pause_on_oops, pause_on_oops, int, 0644);
Prarit Bhargava's avatar
Prarit Bhargava committed
769
core_param(panic_on_warn, panic_on_warn, int, 0644);
770
core_param(crash_kexec_post_notifiers, crash_kexec_post_notifiers, bool, 0644);
771

772 773 774 775 776 777 778 779 780
static int __init oops_setup(char *s)
{
	if (!s)
		return -EINVAL;
	if (!strcmp(s, "panic"))
		panic_on_oops = 1;
	return 0;
}
early_param("oops", oops_setup);
Rafael Aquini's avatar
Rafael Aquini committed
781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801

static int __init panic_on_taint_setup(char *s)
{
	char *taint_str;

	if (!s)
		return -EINVAL;

	taint_str = strsep(&s, ",");
	if (kstrtoul(taint_str, 16, &panic_on_taint))
		return -EINVAL;

	/* make sure panic_on_taint doesn't hold out-of-range TAINT flags */
	panic_on_taint &= TAINT_FLAGS_MAX;

	if (!panic_on_taint)
		return -EINVAL;

	if (s && !strcmp(s, "nousertaint"))
		panic_on_taint_nousertaint = true;

802 803
	pr_info("panic_on_taint: bitmask=0x%lx nousertaint_mode=%s\n",
		panic_on_taint, str_enabled_disabled(panic_on_taint_nousertaint));
Rafael Aquini's avatar
Rafael Aquini committed
804 805 806 807

	return 0;
}
early_param("panic_on_taint", panic_on_taint_setup);