Commit 4f3e7524 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar

lockdep: map_acquire

Most the free-standing lock_acquire() usages look remarkably similar, sweep
them into a new helper.
Signed-off-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent f82b217e
...@@ -291,7 +291,7 @@ handle_t *journal_start(journal_t *journal, int nblocks) ...@@ -291,7 +291,7 @@ handle_t *journal_start(journal_t *journal, int nblocks)
goto out; goto out;
} }
lock_acquire(&handle->h_lockdep_map, 0, 0, 0, 2, _THIS_IP_); map_acquire(&handle->h_lockdep_map);
out: out:
return handle; return handle;
...@@ -1448,7 +1448,7 @@ int journal_stop(handle_t *handle) ...@@ -1448,7 +1448,7 @@ int journal_stop(handle_t *handle)
spin_unlock(&journal->j_state_lock); spin_unlock(&journal->j_state_lock);
} }
lock_release(&handle->h_lockdep_map, 1, _THIS_IP_); map_release(&handle->h_lockdep_map);
jbd_free_handle(handle); jbd_free_handle(handle);
return err; return err;
......
...@@ -301,7 +301,7 @@ handle_t *jbd2_journal_start(journal_t *journal, int nblocks) ...@@ -301,7 +301,7 @@ handle_t *jbd2_journal_start(journal_t *journal, int nblocks)
goto out; goto out;
} }
lock_acquire(&handle->h_lockdep_map, 0, 0, 0, 2, _THIS_IP_); map_acquire(&handle->h_lockdep_map);
out: out:
return handle; return handle;
} }
...@@ -1279,7 +1279,7 @@ int jbd2_journal_stop(handle_t *handle) ...@@ -1279,7 +1279,7 @@ int jbd2_journal_stop(handle_t *handle)
spin_unlock(&journal->j_state_lock); spin_unlock(&journal->j_state_lock);
} }
lock_release(&handle->h_lockdep_map, 1, _THIS_IP_); map_release(&handle->h_lockdep_map);
jbd2_free_handle(handle); jbd2_free_handle(handle);
return err; return err;
......
...@@ -459,4 +459,16 @@ static inline void print_irqtrace_events(struct task_struct *curr) ...@@ -459,4 +459,16 @@ static inline void print_irqtrace_events(struct task_struct *curr)
# define rwsem_release(l, n, i) do { } while (0) # define rwsem_release(l, n, i) do { } while (0)
#endif #endif
#ifdef CONFIG_DEBUG_LOCK_ALLOC
# ifdef CONFIG_PROVE_LOCKING
# define map_acquire(l) lock_acquire(l, 0, 0, 0, 2, _THIS_IP_)
# else
# define map_acquire(l) lock_acquire(l, 0, 0, 0, 1, _THIS_IP_)
# endif
# define map_release(l) lock_release(l, 1, _THIS_IP_)
#else
# define map_acquire(l) do { } while (0)
# define map_release(l) do { } while (0)
#endif
#endif /* __LINUX_LOCKDEP_H */ #endif /* __LINUX_LOCKDEP_H */
...@@ -290,11 +290,11 @@ static void run_workqueue(struct cpu_workqueue_struct *cwq) ...@@ -290,11 +290,11 @@ static void run_workqueue(struct cpu_workqueue_struct *cwq)
BUG_ON(get_wq_data(work) != cwq); BUG_ON(get_wq_data(work) != cwq);
work_clear_pending(work); work_clear_pending(work);
lock_acquire(&cwq->wq->lockdep_map, 0, 0, 0, 2, _THIS_IP_); map_acquire(&cwq->wq->lockdep_map);
lock_acquire(&lockdep_map, 0, 0, 0, 2, _THIS_IP_); map_acquire(&lockdep_map);
f(work); f(work);
lock_release(&lockdep_map, 1, _THIS_IP_); map_release(&lockdep_map);
lock_release(&cwq->wq->lockdep_map, 1, _THIS_IP_); map_release(&cwq->wq->lockdep_map);
if (unlikely(in_atomic() || lockdep_depth(current) > 0)) { if (unlikely(in_atomic() || lockdep_depth(current) > 0)) {
printk(KERN_ERR "BUG: workqueue leaked lock or atomic: " printk(KERN_ERR "BUG: workqueue leaked lock or atomic: "
...@@ -413,8 +413,8 @@ void flush_workqueue(struct workqueue_struct *wq) ...@@ -413,8 +413,8 @@ void flush_workqueue(struct workqueue_struct *wq)
int cpu; int cpu;
might_sleep(); might_sleep();
lock_acquire(&wq->lockdep_map, 0, 0, 0, 2, _THIS_IP_); map_acquire(&wq->lockdep_map);
lock_release(&wq->lockdep_map, 1, _THIS_IP_); map_release(&wq->lockdep_map);
for_each_cpu_mask_nr(cpu, *cpu_map) for_each_cpu_mask_nr(cpu, *cpu_map)
flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu)); flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu));
} }
...@@ -441,8 +441,8 @@ int flush_work(struct work_struct *work) ...@@ -441,8 +441,8 @@ int flush_work(struct work_struct *work)
if (!cwq) if (!cwq)
return 0; return 0;
lock_acquire(&cwq->wq->lockdep_map, 0, 0, 0, 2, _THIS_IP_); map_acquire(&cwq->wq->lockdep_map);
lock_release(&cwq->wq->lockdep_map, 1, _THIS_IP_); map_release(&cwq->wq->lockdep_map);
prev = NULL; prev = NULL;
spin_lock_irq(&cwq->lock); spin_lock_irq(&cwq->lock);
...@@ -536,8 +536,8 @@ static void wait_on_work(struct work_struct *work) ...@@ -536,8 +536,8 @@ static void wait_on_work(struct work_struct *work)
might_sleep(); might_sleep();
lock_acquire(&work->lockdep_map, 0, 0, 0, 2, _THIS_IP_); map_acquire(&work->lockdep_map);
lock_release(&work->lockdep_map, 1, _THIS_IP_); map_release(&work->lockdep_map);
cwq = get_wq_data(work); cwq = get_wq_data(work);
if (!cwq) if (!cwq)
...@@ -861,8 +861,8 @@ static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq) ...@@ -861,8 +861,8 @@ static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq)
if (cwq->thread == NULL) if (cwq->thread == NULL)
return; return;
lock_acquire(&cwq->wq->lockdep_map, 0, 0, 0, 2, _THIS_IP_); map_acquire(&cwq->wq->lockdep_map);
lock_release(&cwq->wq->lockdep_map, 1, _THIS_IP_); map_release(&cwq->wq->lockdep_map);
flush_cpu_workqueue(cwq); flush_cpu_workqueue(cwq);
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment