Commit faebd693 authored by John Ogness's avatar John Ogness Committed by Petr Mladek

printk: rename cpulock functions

Since the printk cpulock is CPU-reentrant and since it is used
in all contexts, its usage must be carefully considered and
most likely will require programming locklessly. To avoid
mistaking the printk cpulock as a typical lock, rename it to
cpu_sync. The main functions then become:

    printk_cpu_sync_get_irqsave(flags);
    printk_cpu_sync_put_irqrestore(flags);

Add extra notes of caution in the function description to help
developers understand the requirements for correct usage.
Signed-off-by: default avatarJohn Ogness <john.ogness@linutronix.de>
Reviewed-by: default avatarPetr Mladek <pmladek@suse.com>
Signed-off-by: default avatarPetr Mladek <pmladek@suse.com>
Link: https://lore.kernel.org/r/20220421212250.565456-2-john.ogness@linutronix.de
parent 3ef4ea3d
...@@ -277,43 +277,55 @@ static inline void printk_trigger_flush(void) ...@@ -277,43 +277,55 @@ static inline void printk_trigger_flush(void)
#endif #endif
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
extern int __printk_cpu_trylock(void); extern int __printk_cpu_sync_try_get(void);
extern void __printk_wait_on_cpu_lock(void); extern void __printk_cpu_sync_wait(void);
extern void __printk_cpu_unlock(void); extern void __printk_cpu_sync_put(void);
/** /**
* printk_cpu_lock_irqsave() - Acquire the printk cpu-reentrant spinning * printk_cpu_sync_get_irqsave() - Acquire the printk cpu-reentrant spinning
* lock and disable interrupts. * lock and disable interrupts.
* @flags: Stack-allocated storage for saving local interrupt state, * @flags: Stack-allocated storage for saving local interrupt state,
* to be passed to printk_cpu_unlock_irqrestore(). * to be passed to printk_cpu_sync_put_irqrestore().
* *
* If the lock is owned by another CPU, spin until it becomes available. * If the lock is owned by another CPU, spin until it becomes available.
* Interrupts are restored while spinning. * Interrupts are restored while spinning.
*
* CAUTION: This function must be used carefully. It does not behave like a
* typical lock. Here are important things to watch out for...
*
* * This function is reentrant on the same CPU. Therefore the calling
* code must not assume exclusive access to data if code accessing the
* data can run reentrant or within NMI context on the same CPU.
*
* * If there exists usage of this function from NMI context, it becomes
* unsafe to perform any type of locking or spinning to wait for other
* CPUs after calling this function from any context. This includes
* using spinlocks or any other busy-waiting synchronization methods.
*/ */
#define printk_cpu_lock_irqsave(flags) \ #define printk_cpu_sync_get_irqsave(flags) \
for (;;) { \ for (;;) { \
local_irq_save(flags); \ local_irq_save(flags); \
if (__printk_cpu_trylock()) \ if (__printk_cpu_sync_try_get()) \
break; \ break; \
local_irq_restore(flags); \ local_irq_restore(flags); \
__printk_wait_on_cpu_lock(); \ __printk_cpu_sync_wait(); \
} }
/** /**
* printk_cpu_unlock_irqrestore() - Release the printk cpu-reentrant spinning * printk_cpu_sync_put_irqrestore() - Release the printk cpu-reentrant spinning
* lock and restore interrupts. * lock and restore interrupts.
* @flags: Caller's saved interrupt state, from printk_cpu_lock_irqsave(). * @flags: Caller's saved interrupt state, from printk_cpu_sync_get_irqsave().
*/ */
#define printk_cpu_unlock_irqrestore(flags) \ #define printk_cpu_sync_put_irqrestore(flags) \
do { \ do { \
__printk_cpu_unlock(); \ __printk_cpu_sync_put(); \
local_irq_restore(flags); \ local_irq_restore(flags); \
} while (0) \ } while (0)
#else #else
#define printk_cpu_lock_irqsave(flags) ((void)flags) #define printk_cpu_sync_get_irqsave(flags) ((void)flags)
#define printk_cpu_unlock_irqrestore(flags) ((void)flags) #define printk_cpu_sync_put_irqrestore(flags) ((void)flags)
#endif /* CONFIG_SMP */ #endif /* CONFIG_SMP */
......
...@@ -3667,26 +3667,26 @@ EXPORT_SYMBOL_GPL(kmsg_dump_rewind); ...@@ -3667,26 +3667,26 @@ EXPORT_SYMBOL_GPL(kmsg_dump_rewind);
#endif #endif
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
static atomic_t printk_cpulock_owner = ATOMIC_INIT(-1); static atomic_t printk_cpu_sync_owner = ATOMIC_INIT(-1);
static atomic_t printk_cpulock_nested = ATOMIC_INIT(0); static atomic_t printk_cpu_sync_nested = ATOMIC_INIT(0);
/** /**
* __printk_wait_on_cpu_lock() - Busy wait until the printk cpu-reentrant * __printk_cpu_sync_wait() - Busy wait until the printk cpu-reentrant
* spinning lock is not owned by any CPU. * spinning lock is not owned by any CPU.
* *
* Context: Any context. * Context: Any context.
*/ */
void __printk_wait_on_cpu_lock(void) void __printk_cpu_sync_wait(void)
{ {
do { do {
cpu_relax(); cpu_relax();
} while (atomic_read(&printk_cpulock_owner) != -1); } while (atomic_read(&printk_cpu_sync_owner) != -1);
} }
EXPORT_SYMBOL(__printk_wait_on_cpu_lock); EXPORT_SYMBOL(__printk_cpu_sync_wait);
/** /**
* __printk_cpu_trylock() - Try to acquire the printk cpu-reentrant * __printk_cpu_sync_try_get() - Try to acquire the printk cpu-reentrant
* spinning lock. * spinning lock.
* *
* If no processor has the lock, the calling processor takes the lock and * If no processor has the lock, the calling processor takes the lock and
* becomes the owner. If the calling processor is already the owner of the * becomes the owner. If the calling processor is already the owner of the
...@@ -3695,7 +3695,7 @@ EXPORT_SYMBOL(__printk_wait_on_cpu_lock); ...@@ -3695,7 +3695,7 @@ EXPORT_SYMBOL(__printk_wait_on_cpu_lock);
* Context: Any context. Expects interrupts to be disabled. * Context: Any context. Expects interrupts to be disabled.
* Return: 1 on success, otherwise 0. * Return: 1 on success, otherwise 0.
*/ */
int __printk_cpu_trylock(void) int __printk_cpu_sync_try_get(void)
{ {
int cpu; int cpu;
int old; int old;
...@@ -3705,79 +3705,80 @@ int __printk_cpu_trylock(void) ...@@ -3705,79 +3705,80 @@ int __printk_cpu_trylock(void)
/* /*
* Guarantee loads and stores from this CPU when it is the lock owner * Guarantee loads and stores from this CPU when it is the lock owner
* are _not_ visible to the previous lock owner. This pairs with * are _not_ visible to the previous lock owner. This pairs with
* __printk_cpu_unlock:B. * __printk_cpu_sync_put:B.
* *
* Memory barrier involvement: * Memory barrier involvement:
* *
* If __printk_cpu_trylock:A reads from __printk_cpu_unlock:B, then * If __printk_cpu_sync_try_get:A reads from __printk_cpu_sync_put:B,
* __printk_cpu_unlock:A can never read from __printk_cpu_trylock:B. * then __printk_cpu_sync_put:A can never read from
* __printk_cpu_sync_try_get:B.
* *
* Relies on: * Relies on:
* *
* RELEASE from __printk_cpu_unlock:A to __printk_cpu_unlock:B * RELEASE from __printk_cpu_sync_put:A to __printk_cpu_sync_put:B
* of the previous CPU * of the previous CPU
* matching * matching
* ACQUIRE from __printk_cpu_trylock:A to __printk_cpu_trylock:B * ACQUIRE from __printk_cpu_sync_try_get:A to
* of this CPU * __printk_cpu_sync_try_get:B of this CPU
*/ */
old = atomic_cmpxchg_acquire(&printk_cpulock_owner, -1, old = atomic_cmpxchg_acquire(&printk_cpu_sync_owner, -1,
cpu); /* LMM(__printk_cpu_trylock:A) */ cpu); /* LMM(__printk_cpu_sync_try_get:A) */
if (old == -1) { if (old == -1) {
/* /*
* This CPU is now the owner and begins loading/storing * This CPU is now the owner and begins loading/storing
* data: LMM(__printk_cpu_trylock:B) * data: LMM(__printk_cpu_sync_try_get:B)
*/ */
return 1; return 1;
} else if (old == cpu) { } else if (old == cpu) {
/* This CPU is already the owner. */ /* This CPU is already the owner. */
atomic_inc(&printk_cpulock_nested); atomic_inc(&printk_cpu_sync_nested);
return 1; return 1;
} }
return 0; return 0;
} }
EXPORT_SYMBOL(__printk_cpu_trylock); EXPORT_SYMBOL(__printk_cpu_sync_try_get);
/** /**
* __printk_cpu_unlock() - Release the printk cpu-reentrant spinning lock. * __printk_cpu_sync_put() - Release the printk cpu-reentrant spinning lock.
* *
* The calling processor must be the owner of the lock. * The calling processor must be the owner of the lock.
* *
* Context: Any context. Expects interrupts to be disabled. * Context: Any context. Expects interrupts to be disabled.
*/ */
void __printk_cpu_unlock(void) void __printk_cpu_sync_put(void)
{ {
if (atomic_read(&printk_cpulock_nested)) { if (atomic_read(&printk_cpu_sync_nested)) {
atomic_dec(&printk_cpulock_nested); atomic_dec(&printk_cpu_sync_nested);
return; return;
} }
/* /*
* This CPU is finished loading/storing data: * This CPU is finished loading/storing data:
* LMM(__printk_cpu_unlock:A) * LMM(__printk_cpu_sync_put:A)
*/ */
/* /*
* Guarantee loads and stores from this CPU when it was the * Guarantee loads and stores from this CPU when it was the
* lock owner are visible to the next lock owner. This pairs * lock owner are visible to the next lock owner. This pairs
* with __printk_cpu_trylock:A. * with __printk_cpu_sync_try_get:A.
* *
* Memory barrier involvement: * Memory barrier involvement:
* *
* If __printk_cpu_trylock:A reads from __printk_cpu_unlock:B, * If __printk_cpu_sync_try_get:A reads from __printk_cpu_sync_put:B,
* then __printk_cpu_trylock:B reads from __printk_cpu_unlock:A. * then __printk_cpu_sync_try_get:B reads from __printk_cpu_sync_put:A.
* *
* Relies on: * Relies on:
* *
* RELEASE from __printk_cpu_unlock:A to __printk_cpu_unlock:B * RELEASE from __printk_cpu_sync_put:A to __printk_cpu_sync_put:B
* of this CPU * of this CPU
* matching * matching
* ACQUIRE from __printk_cpu_trylock:A to __printk_cpu_trylock:B * ACQUIRE from __printk_cpu_sync_try_get:A to
* of the next CPU * __printk_cpu_sync_try_get:B of the next CPU
*/ */
atomic_set_release(&printk_cpulock_owner, atomic_set_release(&printk_cpu_sync_owner,
-1); /* LMM(__printk_cpu_unlock:B) */ -1); /* LMM(__printk_cpu_sync_put:B) */
} }
EXPORT_SYMBOL(__printk_cpu_unlock); EXPORT_SYMBOL(__printk_cpu_sync_put);
#endif /* CONFIG_SMP */ #endif /* CONFIG_SMP */
...@@ -102,9 +102,9 @@ asmlinkage __visible void dump_stack_lvl(const char *log_lvl) ...@@ -102,9 +102,9 @@ asmlinkage __visible void dump_stack_lvl(const char *log_lvl)
* Permit this cpu to perform nested stack dumps while serialising * Permit this cpu to perform nested stack dumps while serialising
* against other CPUs * against other CPUs
*/ */
printk_cpu_lock_irqsave(flags); printk_cpu_sync_get_irqsave(flags);
__dump_stack(log_lvl); __dump_stack(log_lvl);
printk_cpu_unlock_irqrestore(flags); printk_cpu_sync_put_irqrestore(flags);
} }
EXPORT_SYMBOL(dump_stack_lvl); EXPORT_SYMBOL(dump_stack_lvl);
......
...@@ -99,7 +99,7 @@ bool nmi_cpu_backtrace(struct pt_regs *regs) ...@@ -99,7 +99,7 @@ bool nmi_cpu_backtrace(struct pt_regs *regs)
* Allow nested NMI backtraces while serializing * Allow nested NMI backtraces while serializing
* against other CPUs. * against other CPUs.
*/ */
printk_cpu_lock_irqsave(flags); printk_cpu_sync_get_irqsave(flags);
if (!READ_ONCE(backtrace_idle) && regs && cpu_in_idle(instruction_pointer(regs))) { if (!READ_ONCE(backtrace_idle) && regs && cpu_in_idle(instruction_pointer(regs))) {
pr_warn("NMI backtrace for cpu %d skipped: idling at %pS\n", pr_warn("NMI backtrace for cpu %d skipped: idling at %pS\n",
cpu, (void *)instruction_pointer(regs)); cpu, (void *)instruction_pointer(regs));
...@@ -110,7 +110,7 @@ bool nmi_cpu_backtrace(struct pt_regs *regs) ...@@ -110,7 +110,7 @@ bool nmi_cpu_backtrace(struct pt_regs *regs)
else else
dump_stack(); dump_stack();
} }
printk_cpu_unlock_irqrestore(flags); printk_cpu_sync_put_irqrestore(flags);
cpumask_clear_cpu(cpu, to_cpumask(backtrace_mask)); cpumask_clear_cpu(cpu, to_cpumask(backtrace_mask));
return true; return true;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment