Commit f9ad4a5f authored by Peter Zijlstra's avatar Peter Zijlstra

lockdep: Remove lockdep_hardirq{s_enabled,_context}() argument

Now that the macros use per-cpu data, we no longer need the argument.
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: default avatarIngo Molnar <mingo@kernel.org>
Link: https://lkml.kernel.org/r/20200623083721.571835311@infradead.org
parent a21ee605
...@@ -758,7 +758,7 @@ noinstr void idtentry_exit_user(struct pt_regs *regs) ...@@ -758,7 +758,7 @@ noinstr void idtentry_exit_user(struct pt_regs *regs)
noinstr bool idtentry_enter_nmi(struct pt_regs *regs) noinstr bool idtentry_enter_nmi(struct pt_regs *regs)
{ {
bool irq_state = lockdep_hardirqs_enabled(current); bool irq_state = lockdep_hardirqs_enabled();
__nmi_enter(); __nmi_enter();
lockdep_hardirqs_off(CALLER_ADDR0); lockdep_hardirqs_off(CALLER_ADDR0);
......
...@@ -40,9 +40,9 @@ DECLARE_PER_CPU(int, hardirq_context); ...@@ -40,9 +40,9 @@ DECLARE_PER_CPU(int, hardirq_context);
extern void trace_hardirqs_off_finish(void); extern void trace_hardirqs_off_finish(void);
extern void trace_hardirqs_on(void); extern void trace_hardirqs_on(void);
extern void trace_hardirqs_off(void); extern void trace_hardirqs_off(void);
# define lockdep_hardirq_context(p) (this_cpu_read(hardirq_context)) # define lockdep_hardirq_context() (this_cpu_read(hardirq_context))
# define lockdep_softirq_context(p) ((p)->softirq_context) # define lockdep_softirq_context(p) ((p)->softirq_context)
# define lockdep_hardirqs_enabled(p) (this_cpu_read(hardirqs_enabled)) # define lockdep_hardirqs_enabled() (this_cpu_read(hardirqs_enabled))
# define lockdep_softirqs_enabled(p) ((p)->softirqs_enabled) # define lockdep_softirqs_enabled(p) ((p)->softirqs_enabled)
# define lockdep_hardirq_enter() \ # define lockdep_hardirq_enter() \
do { \ do { \
...@@ -109,9 +109,9 @@ do { \ ...@@ -109,9 +109,9 @@ do { \
# define trace_hardirqs_off_finish() do { } while (0) # define trace_hardirqs_off_finish() do { } while (0)
# define trace_hardirqs_on() do { } while (0) # define trace_hardirqs_on() do { } while (0)
# define trace_hardirqs_off() do { } while (0) # define trace_hardirqs_off() do { } while (0)
# define lockdep_hardirq_context(p) 0 # define lockdep_hardirq_context() 0
# define lockdep_softirq_context(p) 0 # define lockdep_softirq_context(p) 0
# define lockdep_hardirqs_enabled(p) 0 # define lockdep_hardirqs_enabled() 0
# define lockdep_softirqs_enabled(p) 0 # define lockdep_softirqs_enabled(p) 0
# define lockdep_hardirq_enter() do { } while (0) # define lockdep_hardirq_enter() do { } while (0)
# define lockdep_hardirq_threaded() do { } while (0) # define lockdep_hardirq_threaded() do { } while (0)
......
...@@ -562,7 +562,7 @@ do { \ ...@@ -562,7 +562,7 @@ do { \
# define lockdep_assert_RT_in_threaded_ctx() do { \ # define lockdep_assert_RT_in_threaded_ctx() do { \
WARN_ONCE(debug_locks && !current->lockdep_recursion && \ WARN_ONCE(debug_locks && !current->lockdep_recursion && \
lockdep_hardirq_context(current) && \ lockdep_hardirq_context() && \
!(current->hardirq_threaded || current->irq_config), \ !(current->hardirq_threaded || current->irq_config), \
"Not in threaded context on PREEMPT_RT as expected\n"); \ "Not in threaded context on PREEMPT_RT as expected\n"); \
} while (0) } while (0)
......
...@@ -2062,9 +2062,9 @@ print_bad_irq_dependency(struct task_struct *curr, ...@@ -2062,9 +2062,9 @@ print_bad_irq_dependency(struct task_struct *curr,
pr_warn("-----------------------------------------------------\n"); pr_warn("-----------------------------------------------------\n");
pr_warn("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] is trying to acquire:\n", pr_warn("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] is trying to acquire:\n",
curr->comm, task_pid_nr(curr), curr->comm, task_pid_nr(curr),
lockdep_hardirq_context(curr), hardirq_count() >> HARDIRQ_SHIFT, lockdep_hardirq_context(), hardirq_count() >> HARDIRQ_SHIFT,
curr->softirq_context, softirq_count() >> SOFTIRQ_SHIFT, curr->softirq_context, softirq_count() >> SOFTIRQ_SHIFT,
lockdep_hardirqs_enabled(curr), lockdep_hardirqs_enabled(),
curr->softirqs_enabled); curr->softirqs_enabled);
print_lock(next); print_lock(next);
...@@ -3331,9 +3331,9 @@ print_usage_bug(struct task_struct *curr, struct held_lock *this, ...@@ -3331,9 +3331,9 @@ print_usage_bug(struct task_struct *curr, struct held_lock *this,
pr_warn("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] takes:\n", pr_warn("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] takes:\n",
curr->comm, task_pid_nr(curr), curr->comm, task_pid_nr(curr),
lockdep_hardirq_context(curr), hardirq_count() >> HARDIRQ_SHIFT, lockdep_hardirq_context(), hardirq_count() >> HARDIRQ_SHIFT,
lockdep_softirq_context(curr), softirq_count() >> SOFTIRQ_SHIFT, lockdep_softirq_context(curr), softirq_count() >> SOFTIRQ_SHIFT,
lockdep_hardirqs_enabled(curr), lockdep_hardirqs_enabled(),
lockdep_softirqs_enabled(curr)); lockdep_softirqs_enabled(curr));
print_lock(this); print_lock(this);
...@@ -3658,7 +3658,7 @@ void lockdep_hardirqs_on_prepare(unsigned long ip) ...@@ -3658,7 +3658,7 @@ void lockdep_hardirqs_on_prepare(unsigned long ip)
if (unlikely(current->lockdep_recursion & LOCKDEP_RECURSION_MASK)) if (unlikely(current->lockdep_recursion & LOCKDEP_RECURSION_MASK))
return; return;
if (unlikely(lockdep_hardirqs_enabled(current))) { if (unlikely(lockdep_hardirqs_enabled())) {
/* /*
* Neither irq nor preemption are disabled here * Neither irq nor preemption are disabled here
* so this is racy by nature but losing one hit * so this is racy by nature but losing one hit
...@@ -3686,7 +3686,7 @@ void lockdep_hardirqs_on_prepare(unsigned long ip) ...@@ -3686,7 +3686,7 @@ void lockdep_hardirqs_on_prepare(unsigned long ip)
* Can't allow enabling interrupts while in an interrupt handler, * Can't allow enabling interrupts while in an interrupt handler,
* that's general bad form and such. Recursion, limited stack etc.. * that's general bad form and such. Recursion, limited stack etc..
*/ */
if (DEBUG_LOCKS_WARN_ON(lockdep_hardirq_context(current))) if (DEBUG_LOCKS_WARN_ON(lockdep_hardirq_context()))
return; return;
current->hardirq_chain_key = current->curr_chain_key; current->hardirq_chain_key = current->curr_chain_key;
...@@ -3724,7 +3724,7 @@ void noinstr lockdep_hardirqs_on(unsigned long ip) ...@@ -3724,7 +3724,7 @@ void noinstr lockdep_hardirqs_on(unsigned long ip)
if (unlikely(current->lockdep_recursion & LOCKDEP_RECURSION_MASK)) if (unlikely(current->lockdep_recursion & LOCKDEP_RECURSION_MASK))
return; return;
if (lockdep_hardirqs_enabled(curr)) { if (lockdep_hardirqs_enabled()) {
/* /*
* Neither irq nor preemption are disabled here * Neither irq nor preemption are disabled here
* so this is racy by nature but losing one hit * so this is racy by nature but losing one hit
...@@ -3783,7 +3783,7 @@ void noinstr lockdep_hardirqs_off(unsigned long ip) ...@@ -3783,7 +3783,7 @@ void noinstr lockdep_hardirqs_off(unsigned long ip)
if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
return; return;
if (lockdep_hardirqs_enabled(curr)) { if (lockdep_hardirqs_enabled()) {
/* /*
* We have done an ON -> OFF transition: * We have done an ON -> OFF transition:
*/ */
...@@ -3832,7 +3832,7 @@ void lockdep_softirqs_on(unsigned long ip) ...@@ -3832,7 +3832,7 @@ void lockdep_softirqs_on(unsigned long ip)
* usage bit for all held locks, if hardirqs are * usage bit for all held locks, if hardirqs are
* enabled too: * enabled too:
*/ */
if (lockdep_hardirqs_enabled(curr)) if (lockdep_hardirqs_enabled())
mark_held_locks(curr, LOCK_ENABLED_SOFTIRQ); mark_held_locks(curr, LOCK_ENABLED_SOFTIRQ);
lockdep_recursion_finish(); lockdep_recursion_finish();
} }
...@@ -3881,7 +3881,7 @@ mark_usage(struct task_struct *curr, struct held_lock *hlock, int check) ...@@ -3881,7 +3881,7 @@ mark_usage(struct task_struct *curr, struct held_lock *hlock, int check)
*/ */
if (!hlock->trylock) { if (!hlock->trylock) {
if (hlock->read) { if (hlock->read) {
if (lockdep_hardirq_context(curr)) if (lockdep_hardirq_context())
if (!mark_lock(curr, hlock, if (!mark_lock(curr, hlock,
LOCK_USED_IN_HARDIRQ_READ)) LOCK_USED_IN_HARDIRQ_READ))
return 0; return 0;
...@@ -3890,7 +3890,7 @@ mark_usage(struct task_struct *curr, struct held_lock *hlock, int check) ...@@ -3890,7 +3890,7 @@ mark_usage(struct task_struct *curr, struct held_lock *hlock, int check)
LOCK_USED_IN_SOFTIRQ_READ)) LOCK_USED_IN_SOFTIRQ_READ))
return 0; return 0;
} else { } else {
if (lockdep_hardirq_context(curr)) if (lockdep_hardirq_context())
if (!mark_lock(curr, hlock, LOCK_USED_IN_HARDIRQ)) if (!mark_lock(curr, hlock, LOCK_USED_IN_HARDIRQ))
return 0; return 0;
if (curr->softirq_context) if (curr->softirq_context)
...@@ -3928,7 +3928,7 @@ mark_usage(struct task_struct *curr, struct held_lock *hlock, int check) ...@@ -3928,7 +3928,7 @@ mark_usage(struct task_struct *curr, struct held_lock *hlock, int check)
static inline unsigned int task_irq_context(struct task_struct *task) static inline unsigned int task_irq_context(struct task_struct *task)
{ {
return LOCK_CHAIN_HARDIRQ_CONTEXT * !!lockdep_hardirq_context(task) + return LOCK_CHAIN_HARDIRQ_CONTEXT * !!lockdep_hardirq_context() +
LOCK_CHAIN_SOFTIRQ_CONTEXT * !!task->softirq_context; LOCK_CHAIN_SOFTIRQ_CONTEXT * !!task->softirq_context;
} }
...@@ -4021,7 +4021,7 @@ static inline short task_wait_context(struct task_struct *curr) ...@@ -4021,7 +4021,7 @@ static inline short task_wait_context(struct task_struct *curr)
* Set appropriate wait type for the context; for IRQs we have to take * Set appropriate wait type for the context; for IRQs we have to take
* into account force_irqthread as that is implied by PREEMPT_RT. * into account force_irqthread as that is implied by PREEMPT_RT.
*/ */
if (lockdep_hardirq_context(curr)) { if (lockdep_hardirq_context()) {
/* /*
* Check if force_irqthreads will run us threaded. * Check if force_irqthreads will run us threaded.
*/ */
...@@ -4864,11 +4864,11 @@ static void check_flags(unsigned long flags) ...@@ -4864,11 +4864,11 @@ static void check_flags(unsigned long flags)
return; return;
if (irqs_disabled_flags(flags)) { if (irqs_disabled_flags(flags)) {
if (DEBUG_LOCKS_WARN_ON(lockdep_hardirqs_enabled(current))) { if (DEBUG_LOCKS_WARN_ON(lockdep_hardirqs_enabled())) {
printk("possible reason: unannotated irqs-off.\n"); printk("possible reason: unannotated irqs-off.\n");
} }
} else { } else {
if (DEBUG_LOCKS_WARN_ON(!lockdep_hardirqs_enabled(current))) { if (DEBUG_LOCKS_WARN_ON(!lockdep_hardirqs_enabled())) {
printk("possible reason: unannotated irqs-on.\n"); printk("possible reason: unannotated irqs-on.\n");
} }
} }
......
...@@ -230,7 +230,7 @@ static inline bool lockdep_softirq_start(void) ...@@ -230,7 +230,7 @@ static inline bool lockdep_softirq_start(void)
{ {
bool in_hardirq = false; bool in_hardirq = false;
if (lockdep_hardirq_context(current)) { if (lockdep_hardirq_context()) {
in_hardirq = true; in_hardirq = true;
lockdep_hardirq_exit(); lockdep_hardirq_exit();
} }
......
...@@ -2,9 +2,9 @@ ...@@ -2,9 +2,9 @@
#ifndef _LIBLOCKDEP_LINUX_TRACE_IRQFLAGS_H_ #ifndef _LIBLOCKDEP_LINUX_TRACE_IRQFLAGS_H_
#define _LIBLOCKDEP_LINUX_TRACE_IRQFLAGS_H_ #define _LIBLOCKDEP_LINUX_TRACE_IRQFLAGS_H_
# define lockdep_hardirq_context(p) 0 # define lockdep_hardirq_context() 0
# define lockdep_softirq_context(p) 0 # define lockdep_softirq_context(p) 0
# define lockdep_hardirqs_enabled(p) 0 # define lockdep_hardirqs_enabled() 0
# define lockdep_softirqs_enabled(p) 0 # define lockdep_softirqs_enabled(p) 0
# define lockdep_hardirq_enter() do { } while (0) # define lockdep_hardirq_enter() do { } while (0)
# define lockdep_hardirq_exit() do { } while (0) # define lockdep_hardirq_exit() do { } while (0)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment