Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
94f2be50
Commit
94f2be50
authored
Jun 29, 2021
by
Petr Mladek
Browse files
Options
Browse Files
Download
Plain Diff
Merge branch 'printk-rework' into for-linus
parents
d8c03214
3342aa8e
Changes
3
Hide whitespace changes
Inline
Side-by-side
Showing
3 changed files
with
159 additions
and
36 deletions
+159
-36
include/linux/printk.h
include/linux/printk.h
+41
-0
kernel/printk/printk.c
kernel/printk/printk.c
+116
-0
lib/dump_stack.c
lib/dump_stack.c
+2
-36
No files found.
include/linux/printk.h
View file @
94f2be50
...
...
@@ -282,6 +282,47 @@ static inline void printk_safe_flush_on_panic(void)
}
#endif
#ifdef CONFIG_SMP
extern
int
__printk_cpu_trylock
(
void
);
extern
void
__printk_wait_on_cpu_lock
(
void
);
extern
void
__printk_cpu_unlock
(
void
);
/**
* printk_cpu_lock_irqsave() - Acquire the printk cpu-reentrant spinning
* lock and disable interrupts.
* @flags: Stack-allocated storage for saving local interrupt state,
* to be passed to printk_cpu_unlock_irqrestore().
*
* If the lock is owned by another CPU, spin until it becomes available.
* Interrupts are restored while spinning.
*/
#define printk_cpu_lock_irqsave(flags) \
for (;;) { \
local_irq_save(flags); \
if (__printk_cpu_trylock()) \
break; \
local_irq_restore(flags); \
__printk_wait_on_cpu_lock(); \
}
/**
* printk_cpu_unlock_irqrestore() - Release the printk cpu-reentrant spinning
* lock and restore interrupts.
* @flags: Caller's saved interrupt state, from printk_cpu_lock_irqsave().
*/
#define printk_cpu_unlock_irqrestore(flags) \
do { \
__printk_cpu_unlock(); \
local_irq_restore(flags); \
} while (0) \
#else
#define printk_cpu_lock_irqsave(flags) ((void)flags)
#define printk_cpu_unlock_irqrestore(flags) ((void)flags)
#endif
/* CONFIG_SMP */
extern
int
kptr_restrict
;
/**
...
...
kernel/printk/printk.c
View file @
94f2be50
...
...
@@ -3531,3 +3531,119 @@ void kmsg_dump_rewind(struct kmsg_dump_iter *iter)
EXPORT_SYMBOL_GPL
(
kmsg_dump_rewind
);
#endif
#ifdef CONFIG_SMP
static
atomic_t
printk_cpulock_owner
=
ATOMIC_INIT
(
-
1
);
static
atomic_t
printk_cpulock_nested
=
ATOMIC_INIT
(
0
);
/**
* __printk_wait_on_cpu_lock() - Busy wait until the printk cpu-reentrant
* spinning lock is not owned by any CPU.
*
* Context: Any context.
*/
void
__printk_wait_on_cpu_lock
(
void
)
{
do
{
cpu_relax
();
}
while
(
atomic_read
(
&
printk_cpulock_owner
)
!=
-
1
);
}
EXPORT_SYMBOL
(
__printk_wait_on_cpu_lock
);
/**
* __printk_cpu_trylock() - Try to acquire the printk cpu-reentrant
* spinning lock.
*
* If no processor has the lock, the calling processor takes the lock and
* becomes the owner. If the calling processor is already the owner of the
* lock, this function succeeds immediately.
*
* Context: Any context. Expects interrupts to be disabled.
* Return: 1 on success, otherwise 0.
*/
int
__printk_cpu_trylock
(
void
)
{
int
cpu
;
int
old
;
cpu
=
smp_processor_id
();
/*
* Guarantee loads and stores from this CPU when it is the lock owner
* are _not_ visible to the previous lock owner. This pairs with
* __printk_cpu_unlock:B.
*
* Memory barrier involvement:
*
* If __printk_cpu_trylock:A reads from __printk_cpu_unlock:B, then
* __printk_cpu_unlock:A can never read from __printk_cpu_trylock:B.
*
* Relies on:
*
* RELEASE from __printk_cpu_unlock:A to __printk_cpu_unlock:B
* of the previous CPU
* matching
* ACQUIRE from __printk_cpu_trylock:A to __printk_cpu_trylock:B
* of this CPU
*/
old
=
atomic_cmpxchg_acquire
(
&
printk_cpulock_owner
,
-
1
,
cpu
);
/* LMM(__printk_cpu_trylock:A) */
if
(
old
==
-
1
)
{
/*
* This CPU is now the owner and begins loading/storing
* data: LMM(__printk_cpu_trylock:B)
*/
return
1
;
}
else
if
(
old
==
cpu
)
{
/* This CPU is already the owner. */
atomic_inc
(
&
printk_cpulock_nested
);
return
1
;
}
return
0
;
}
EXPORT_SYMBOL
(
__printk_cpu_trylock
);
/**
* __printk_cpu_unlock() - Release the printk cpu-reentrant spinning lock.
*
* The calling processor must be the owner of the lock.
*
* Context: Any context. Expects interrupts to be disabled.
*/
void
__printk_cpu_unlock
(
void
)
{
if
(
atomic_read
(
&
printk_cpulock_nested
))
{
atomic_dec
(
&
printk_cpulock_nested
);
return
;
}
/*
* This CPU is finished loading/storing data:
* LMM(__printk_cpu_unlock:A)
*/
/*
* Guarantee loads and stores from this CPU when it was the
* lock owner are visible to the next lock owner. This pairs
* with __printk_cpu_trylock:A.
*
* Memory barrier involvement:
*
* If __printk_cpu_trylock:A reads from __printk_cpu_unlock:B,
* then __printk_cpu_trylock:B reads from __printk_cpu_unlock:A.
*
* Relies on:
*
* RELEASE from __printk_cpu_unlock:A to __printk_cpu_unlock:B
* of this CPU
* matching
* ACQUIRE from __printk_cpu_trylock:A to __printk_cpu_trylock:B
* of the next CPU
*/
atomic_set_release
(
&
printk_cpulock_owner
,
-
1
);
/* LMM(__printk_cpu_unlock:B) */
}
EXPORT_SYMBOL
(
__printk_cpu_unlock
);
#endif
/* CONFIG_SMP */
lib/dump_stack.c
View file @
94f2be50
...
...
@@ -84,50 +84,16 @@ static void __dump_stack(void)
*
* Architectures can override this implementation by implementing its own.
*/
#ifdef CONFIG_SMP
static
atomic_t
dump_lock
=
ATOMIC_INIT
(
-
1
);
asmlinkage
__visible
void
dump_stack
(
void
)
{
unsigned
long
flags
;
int
was_locked
;
int
old
;
int
cpu
;
/*
* Permit this cpu to perform nested stack dumps while serialising
* against other CPUs
*/
retry:
local_irq_save
(
flags
);
cpu
=
smp_processor_id
();
old
=
atomic_cmpxchg
(
&
dump_lock
,
-
1
,
cpu
);
if
(
old
==
-
1
)
{
was_locked
=
0
;
}
else
if
(
old
==
cpu
)
{
was_locked
=
1
;
}
else
{
local_irq_restore
(
flags
);
/*
* Wait for the lock to release before jumping to
* atomic_cmpxchg() in order to mitigate the thundering herd
* problem.
*/
do
{
cpu_relax
();
}
while
(
atomic_read
(
&
dump_lock
)
!=
-
1
);
goto
retry
;
}
__dump_stack
();
if
(
!
was_locked
)
atomic_set
(
&
dump_lock
,
-
1
);
local_irq_restore
(
flags
);
}
#else
asmlinkage
__visible
void
dump_stack
(
void
)
{
printk_cpu_lock_irqsave
(
flags
);
__dump_stack
();
printk_cpu_unlock_irqrestore
(
flags
);
}
#endif
EXPORT_SYMBOL
(
dump_stack
);
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment