Commit eed1fc87 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/pmladek/printk

Pull printk updates from Petr Mladek:

 - Store printk() messages into the main log buffer directly even in NMI
   when the lock is available. It is the best effort to print even large
   chunk of text. It is handy, for example, when all ftrace messages are
   printed during the system panic in NMI.

 - Add missing annotations to calm down compiler warnings

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/pmladek/printk:
  printk: add __printf attributes to internal functions
  printk: Use the main logbuf in NMI when logbuf_lock is available
parents 19964541 a5707eef
...@@ -18,12 +18,14 @@ ...@@ -18,12 +18,14 @@
#ifdef CONFIG_PRINTK #ifdef CONFIG_PRINTK
#define PRINTK_SAFE_CONTEXT_MASK 0x7fffffff #define PRINTK_SAFE_CONTEXT_MASK 0x3fffffff
#define PRINTK_NMI_DEFERRED_CONTEXT_MASK 0x40000000
#define PRINTK_NMI_CONTEXT_MASK 0x80000000 #define PRINTK_NMI_CONTEXT_MASK 0x80000000
extern raw_spinlock_t logbuf_lock; extern raw_spinlock_t logbuf_lock;
__printf(1, 0) int vprintk_default(const char *fmt, va_list args); __printf(1, 0) int vprintk_default(const char *fmt, va_list args);
__printf(1, 0) int vprintk_deferred(const char *fmt, va_list args);
__printf(1, 0) int vprintk_func(const char *fmt, va_list args); __printf(1, 0) int vprintk_func(const char *fmt, va_list args);
void __printk_safe_enter(void); void __printk_safe_enter(void);
void __printk_safe_exit(void); void __printk_safe_exit(void);
......
...@@ -2720,16 +2720,13 @@ void wake_up_klogd(void) ...@@ -2720,16 +2720,13 @@ void wake_up_klogd(void)
preempt_enable(); preempt_enable();
} }
int printk_deferred(const char *fmt, ...) int vprintk_deferred(const char *fmt, va_list args)
{ {
va_list args;
int r; int r;
preempt_disable();
va_start(args, fmt);
r = vprintk_emit(0, LOGLEVEL_SCHED, NULL, 0, fmt, args); r = vprintk_emit(0, LOGLEVEL_SCHED, NULL, 0, fmt, args);
va_end(args);
preempt_disable();
__this_cpu_or(printk_pending, PRINTK_PENDING_OUTPUT); __this_cpu_or(printk_pending, PRINTK_PENDING_OUTPUT);
irq_work_queue(this_cpu_ptr(&wake_up_klogd_work)); irq_work_queue(this_cpu_ptr(&wake_up_klogd_work));
preempt_enable(); preempt_enable();
...@@ -2737,6 +2734,18 @@ int printk_deferred(const char *fmt, ...) ...@@ -2737,6 +2734,18 @@ int printk_deferred(const char *fmt, ...)
return r; return r;
} }
int printk_deferred(const char *fmt, ...)
{
va_list args;
int r;
va_start(args, fmt);
r = vprintk_deferred(fmt, args);
va_end(args);
return r;
}
/* /*
* printk rate limiting, lifted from the networking subsystem. * printk rate limiting, lifted from the networking subsystem.
* *
......
...@@ -80,7 +80,7 @@ static void queue_flush_work(struct printk_safe_seq_buf *s) ...@@ -80,7 +80,7 @@ static void queue_flush_work(struct printk_safe_seq_buf *s)
* happen, printk_safe_log_store() will notice the buffer->len mismatch * happen, printk_safe_log_store() will notice the buffer->len mismatch
* and repeat the write. * and repeat the write.
*/ */
static int printk_safe_log_store(struct printk_safe_seq_buf *s, static __printf(2, 0) int printk_safe_log_store(struct printk_safe_seq_buf *s,
const char *fmt, va_list args) const char *fmt, va_list args)
{ {
int add; int add;
...@@ -299,7 +299,7 @@ void printk_safe_flush_on_panic(void) ...@@ -299,7 +299,7 @@ void printk_safe_flush_on_panic(void)
* one writer running. But the buffer might get flushed from another * one writer running. But the buffer might get flushed from another
* CPU, so we need to be careful. * CPU, so we need to be careful.
*/ */
static int vprintk_nmi(const char *fmt, va_list args) static __printf(1, 0) int vprintk_nmi(const char *fmt, va_list args)
{ {
struct printk_safe_seq_buf *s = this_cpu_ptr(&nmi_print_seq); struct printk_safe_seq_buf *s = this_cpu_ptr(&nmi_print_seq);
...@@ -308,17 +308,29 @@ static int vprintk_nmi(const char *fmt, va_list args) ...@@ -308,17 +308,29 @@ static int vprintk_nmi(const char *fmt, va_list args)
void printk_nmi_enter(void) void printk_nmi_enter(void)
{ {
/*
* The size of the extra per-CPU buffer is limited. Use it only when
* the main one is locked. If this CPU is not in the safe context,
* the lock must be taken on another CPU and we could wait for it.
*/
if ((this_cpu_read(printk_context) & PRINTK_SAFE_CONTEXT_MASK) &&
raw_spin_is_locked(&logbuf_lock)) {
this_cpu_or(printk_context, PRINTK_NMI_CONTEXT_MASK); this_cpu_or(printk_context, PRINTK_NMI_CONTEXT_MASK);
} else {
this_cpu_or(printk_context, PRINTK_NMI_DEFERRED_CONTEXT_MASK);
}
} }
void printk_nmi_exit(void) void printk_nmi_exit(void)
{ {
this_cpu_and(printk_context, ~PRINTK_NMI_CONTEXT_MASK); this_cpu_and(printk_context,
~(PRINTK_NMI_CONTEXT_MASK |
PRINTK_NMI_DEFERRED_CONTEXT_MASK));
} }
#else #else
static int vprintk_nmi(const char *fmt, va_list args) static __printf(1, 0) int vprintk_nmi(const char *fmt, va_list args)
{ {
return 0; return 0;
} }
...@@ -330,7 +342,7 @@ static int vprintk_nmi(const char *fmt, va_list args) ...@@ -330,7 +342,7 @@ static int vprintk_nmi(const char *fmt, va_list args)
* into itself. It uses a per-CPU buffer to store the message, just like * into itself. It uses a per-CPU buffer to store the message, just like
* NMI. * NMI.
*/ */
static int vprintk_safe(const char *fmt, va_list args) static __printf(1, 0) int vprintk_safe(const char *fmt, va_list args)
{ {
struct printk_safe_seq_buf *s = this_cpu_ptr(&safe_print_seq); struct printk_safe_seq_buf *s = this_cpu_ptr(&safe_print_seq);
...@@ -351,12 +363,22 @@ void __printk_safe_exit(void) ...@@ -351,12 +363,22 @@ void __printk_safe_exit(void)
__printf(1, 0) int vprintk_func(const char *fmt, va_list args) __printf(1, 0) int vprintk_func(const char *fmt, va_list args)
{ {
/* Use extra buffer in NMI when logbuf_lock is taken or in safe mode. */
if (this_cpu_read(printk_context) & PRINTK_NMI_CONTEXT_MASK) if (this_cpu_read(printk_context) & PRINTK_NMI_CONTEXT_MASK)
return vprintk_nmi(fmt, args); return vprintk_nmi(fmt, args);
/* Use extra buffer to prevent a recursion deadlock in safe mode. */
if (this_cpu_read(printk_context) & PRINTK_SAFE_CONTEXT_MASK) if (this_cpu_read(printk_context) & PRINTK_SAFE_CONTEXT_MASK)
return vprintk_safe(fmt, args); return vprintk_safe(fmt, args);
/*
* Use the main logbuf when logbuf_lock is available in NMI.
* But avoid calling console drivers that might have their own locks.
*/
if (this_cpu_read(printk_context) & PRINTK_NMI_DEFERRED_CONTEXT_MASK)
return vprintk_deferred(fmt, args);
/* No obstacles. */
return vprintk_default(fmt, args); return vprintk_default(fmt, args);
} }
......
...@@ -86,9 +86,11 @@ void nmi_trigger_cpumask_backtrace(const cpumask_t *mask, ...@@ -86,9 +86,11 @@ void nmi_trigger_cpumask_backtrace(const cpumask_t *mask,
bool nmi_cpu_backtrace(struct pt_regs *regs) bool nmi_cpu_backtrace(struct pt_regs *regs)
{ {
static arch_spinlock_t lock = __ARCH_SPIN_LOCK_UNLOCKED;
int cpu = smp_processor_id(); int cpu = smp_processor_id();
if (cpumask_test_cpu(cpu, to_cpumask(backtrace_mask))) { if (cpumask_test_cpu(cpu, to_cpumask(backtrace_mask))) {
arch_spin_lock(&lock);
if (regs && cpu_in_idle(instruction_pointer(regs))) { if (regs && cpu_in_idle(instruction_pointer(regs))) {
pr_warn("NMI backtrace for cpu %d skipped: idling at pc %#lx\n", pr_warn("NMI backtrace for cpu %d skipped: idling at pc %#lx\n",
cpu, instruction_pointer(regs)); cpu, instruction_pointer(regs));
...@@ -99,6 +101,7 @@ bool nmi_cpu_backtrace(struct pt_regs *regs) ...@@ -99,6 +101,7 @@ bool nmi_cpu_backtrace(struct pt_regs *regs)
else else
dump_stack(); dump_stack();
} }
arch_spin_unlock(&lock);
cpumask_clear_cpu(cpu, to_cpumask(backtrace_mask)); cpumask_clear_cpu(cpu, to_cpumask(backtrace_mask));
return true; return true;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment