Commit 23a0ee90 authored by Ingo Molnar's avatar Ingo Molnar

Merge branch 'core/locking' into core/urgent

parents cc7a486c 0f2bc27b
...@@ -291,7 +291,7 @@ handle_t *journal_start(journal_t *journal, int nblocks) ...@@ -291,7 +291,7 @@ handle_t *journal_start(journal_t *journal, int nblocks)
goto out; goto out;
} }
lock_acquire(&handle->h_lockdep_map, 0, 0, 0, 2, _THIS_IP_); lock_map_acquire(&handle->h_lockdep_map);
out: out:
return handle; return handle;
...@@ -1448,7 +1448,7 @@ int journal_stop(handle_t *handle) ...@@ -1448,7 +1448,7 @@ int journal_stop(handle_t *handle)
spin_unlock(&journal->j_state_lock); spin_unlock(&journal->j_state_lock);
} }
lock_release(&handle->h_lockdep_map, 1, _THIS_IP_); lock_map_release(&handle->h_lockdep_map);
jbd_free_handle(handle); jbd_free_handle(handle);
return err; return err;
......
...@@ -301,7 +301,7 @@ handle_t *jbd2_journal_start(journal_t *journal, int nblocks) ...@@ -301,7 +301,7 @@ handle_t *jbd2_journal_start(journal_t *journal, int nblocks)
goto out; goto out;
} }
lock_acquire(&handle->h_lockdep_map, 0, 0, 0, 2, _THIS_IP_); lock_map_acquire(&handle->h_lockdep_map);
out: out:
return handle; return handle;
} }
...@@ -1279,7 +1279,7 @@ int jbd2_journal_stop(handle_t *handle) ...@@ -1279,7 +1279,7 @@ int jbd2_journal_stop(handle_t *handle)
spin_unlock(&journal->j_state_lock); spin_unlock(&journal->j_state_lock);
} }
lock_release(&handle->h_lockdep_map, 1, _THIS_IP_); lock_map_release(&handle->h_lockdep_map);
jbd2_free_handle(handle); jbd2_free_handle(handle);
return err; return err;
......
...@@ -89,6 +89,7 @@ struct lock_class { ...@@ -89,6 +89,7 @@ struct lock_class {
struct lockdep_subclass_key *key; struct lockdep_subclass_key *key;
unsigned int subclass; unsigned int subclass;
unsigned int dep_gen_id;
/* /*
* IRQ/softirq usage tracking bits: * IRQ/softirq usage tracking bits:
...@@ -189,6 +190,14 @@ struct lock_chain { ...@@ -189,6 +190,14 @@ struct lock_chain {
u64 chain_key; u64 chain_key;
}; };
#define MAX_LOCKDEP_KEYS_BITS 13
/*
* Subtract one because we offset hlock->class_idx by 1 in order
* to make 0 mean no class. This avoids overflowing the class_idx
* bitfield and hitting the BUG in hlock_class().
*/
#define MAX_LOCKDEP_KEYS ((1UL << MAX_LOCKDEP_KEYS_BITS) - 1)
struct held_lock { struct held_lock {
/* /*
* One-way hash of the dependency chain up to this point. We * One-way hash of the dependency chain up to this point. We
...@@ -205,14 +214,14 @@ struct held_lock { ...@@ -205,14 +214,14 @@ struct held_lock {
* with zero), here we store the previous hash value: * with zero), here we store the previous hash value:
*/ */
u64 prev_chain_key; u64 prev_chain_key;
struct lock_class *class;
unsigned long acquire_ip; unsigned long acquire_ip;
struct lockdep_map *instance; struct lockdep_map *instance;
struct lockdep_map *nest_lock;
#ifdef CONFIG_LOCK_STAT #ifdef CONFIG_LOCK_STAT
u64 waittime_stamp; u64 waittime_stamp;
u64 holdtime_stamp; u64 holdtime_stamp;
#endif #endif
unsigned int class_idx:MAX_LOCKDEP_KEYS_BITS;
/* /*
* The lock-stack is unified in that the lock chains of interrupt * The lock-stack is unified in that the lock chains of interrupt
* contexts nest ontop of process context chains, but we 'separate' * contexts nest ontop of process context chains, but we 'separate'
...@@ -226,11 +235,11 @@ struct held_lock { ...@@ -226,11 +235,11 @@ struct held_lock {
* The following field is used to detect when we cross into an * The following field is used to detect when we cross into an
* interrupt context: * interrupt context:
*/ */
int irq_context; unsigned int irq_context:2; /* bit 0 - soft, bit 1 - hard */
int trylock; unsigned int trylock:1;
int read; unsigned int read:2; /* see lock_acquire() comment */
int check; unsigned int check:2; /* see lock_acquire() comment */
int hardirqs_off; unsigned int hardirqs_off:1;
}; };
/* /*
...@@ -294,11 +303,15 @@ extern void lockdep_init_map(struct lockdep_map *lock, const char *name, ...@@ -294,11 +303,15 @@ extern void lockdep_init_map(struct lockdep_map *lock, const char *name,
* 2: full validation * 2: full validation
*/ */
extern void lock_acquire(struct lockdep_map *lock, unsigned int subclass, extern void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
int trylock, int read, int check, unsigned long ip); int trylock, int read, int check,
struct lockdep_map *nest_lock, unsigned long ip);
extern void lock_release(struct lockdep_map *lock, int nested, extern void lock_release(struct lockdep_map *lock, int nested,
unsigned long ip); unsigned long ip);
extern void lock_set_subclass(struct lockdep_map *lock, unsigned int subclass,
unsigned long ip);
# define INIT_LOCKDEP .lockdep_recursion = 0, # define INIT_LOCKDEP .lockdep_recursion = 0,
#define lockdep_depth(tsk) (debug_locks ? (tsk)->lockdep_depth : 0) #define lockdep_depth(tsk) (debug_locks ? (tsk)->lockdep_depth : 0)
...@@ -313,8 +326,9 @@ static inline void lockdep_on(void) ...@@ -313,8 +326,9 @@ static inline void lockdep_on(void)
{ {
} }
# define lock_acquire(l, s, t, r, c, i) do { } while (0) # define lock_acquire(l, s, t, r, c, n, i) do { } while (0)
# define lock_release(l, n, i) do { } while (0) # define lock_release(l, n, i) do { } while (0)
# define lock_set_subclass(l, s, i) do { } while (0)
# define lockdep_init() do { } while (0) # define lockdep_init() do { } while (0)
# define lockdep_info() do { } while (0) # define lockdep_info() do { } while (0)
# define lockdep_init_map(lock, name, key, sub) do { (void)(key); } while (0) # define lockdep_init_map(lock, name, key, sub) do { (void)(key); } while (0)
...@@ -400,9 +414,11 @@ static inline void print_irqtrace_events(struct task_struct *curr) ...@@ -400,9 +414,11 @@ static inline void print_irqtrace_events(struct task_struct *curr)
#ifdef CONFIG_DEBUG_LOCK_ALLOC #ifdef CONFIG_DEBUG_LOCK_ALLOC
# ifdef CONFIG_PROVE_LOCKING # ifdef CONFIG_PROVE_LOCKING
# define spin_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, i) # define spin_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, NULL, i)
# define spin_acquire_nest(l, s, t, n, i) lock_acquire(l, s, t, 0, 2, n, i)
# else # else
# define spin_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, i) # define spin_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, NULL, i)
# define spin_acquire_nest(l, s, t, n, i) lock_acquire(l, s, t, 0, 1, NULL, i)
# endif # endif
# define spin_release(l, n, i) lock_release(l, n, i) # define spin_release(l, n, i) lock_release(l, n, i)
#else #else
...@@ -412,11 +428,11 @@ static inline void print_irqtrace_events(struct task_struct *curr) ...@@ -412,11 +428,11 @@ static inline void print_irqtrace_events(struct task_struct *curr)
#ifdef CONFIG_DEBUG_LOCK_ALLOC #ifdef CONFIG_DEBUG_LOCK_ALLOC
# ifdef CONFIG_PROVE_LOCKING # ifdef CONFIG_PROVE_LOCKING
# define rwlock_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, i) # define rwlock_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, NULL, i)
# define rwlock_acquire_read(l, s, t, i) lock_acquire(l, s, t, 2, 2, i) # define rwlock_acquire_read(l, s, t, i) lock_acquire(l, s, t, 2, 2, NULL, i)
# else # else
# define rwlock_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, i) # define rwlock_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, NULL, i)
# define rwlock_acquire_read(l, s, t, i) lock_acquire(l, s, t, 2, 1, i) # define rwlock_acquire_read(l, s, t, i) lock_acquire(l, s, t, 2, 1, NULL, i)
# endif # endif
# define rwlock_release(l, n, i) lock_release(l, n, i) # define rwlock_release(l, n, i) lock_release(l, n, i)
#else #else
...@@ -427,9 +443,9 @@ static inline void print_irqtrace_events(struct task_struct *curr) ...@@ -427,9 +443,9 @@ static inline void print_irqtrace_events(struct task_struct *curr)
#ifdef CONFIG_DEBUG_LOCK_ALLOC #ifdef CONFIG_DEBUG_LOCK_ALLOC
# ifdef CONFIG_PROVE_LOCKING # ifdef CONFIG_PROVE_LOCKING
# define mutex_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, i) # define mutex_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, NULL, i)
# else # else
# define mutex_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, i) # define mutex_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, NULL, i)
# endif # endif
# define mutex_release(l, n, i) lock_release(l, n, i) # define mutex_release(l, n, i) lock_release(l, n, i)
#else #else
...@@ -439,11 +455,11 @@ static inline void print_irqtrace_events(struct task_struct *curr) ...@@ -439,11 +455,11 @@ static inline void print_irqtrace_events(struct task_struct *curr)
#ifdef CONFIG_DEBUG_LOCK_ALLOC #ifdef CONFIG_DEBUG_LOCK_ALLOC
# ifdef CONFIG_PROVE_LOCKING # ifdef CONFIG_PROVE_LOCKING
# define rwsem_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, i) # define rwsem_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, NULL, i)
# define rwsem_acquire_read(l, s, t, i) lock_acquire(l, s, t, 1, 2, i) # define rwsem_acquire_read(l, s, t, i) lock_acquire(l, s, t, 1, 2, NULL, i)
# else # else
# define rwsem_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, i) # define rwsem_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, NULL, i)
# define rwsem_acquire_read(l, s, t, i) lock_acquire(l, s, t, 1, 1, i) # define rwsem_acquire_read(l, s, t, i) lock_acquire(l, s, t, 1, 1, NULL, i)
# endif # endif
# define rwsem_release(l, n, i) lock_release(l, n, i) # define rwsem_release(l, n, i) lock_release(l, n, i)
#else #else
...@@ -452,4 +468,16 @@ static inline void print_irqtrace_events(struct task_struct *curr) ...@@ -452,4 +468,16 @@ static inline void print_irqtrace_events(struct task_struct *curr)
# define rwsem_release(l, n, i) do { } while (0) # define rwsem_release(l, n, i) do { } while (0)
#endif #endif
#ifdef CONFIG_DEBUG_LOCK_ALLOC
# ifdef CONFIG_PROVE_LOCKING
# define lock_map_acquire(l) lock_acquire(l, 0, 0, 0, 2, NULL, _THIS_IP_)
# else
# define lock_map_acquire(l) lock_acquire(l, 0, 0, 0, 1, NULL, _THIS_IP_)
# endif
# define lock_map_release(l) lock_release(l, 1, _THIS_IP_)
#else
# define lock_map_acquire(l) do { } while (0)
# define lock_map_release(l) do { } while (0)
#endif
#endif /* __LINUX_LOCKDEP_H */ #endif /* __LINUX_LOCKDEP_H */
...@@ -117,7 +117,7 @@ extern int rcu_needs_cpu(int cpu); ...@@ -117,7 +117,7 @@ extern int rcu_needs_cpu(int cpu);
#ifdef CONFIG_DEBUG_LOCK_ALLOC #ifdef CONFIG_DEBUG_LOCK_ALLOC
extern struct lockdep_map rcu_lock_map; extern struct lockdep_map rcu_lock_map;
# define rcu_read_acquire() \ # define rcu_read_acquire() \
lock_acquire(&rcu_lock_map, 0, 0, 2, 1, _THIS_IP_) lock_acquire(&rcu_lock_map, 0, 0, 2, 1, NULL, _THIS_IP_)
# define rcu_read_release() lock_release(&rcu_lock_map, 1, _THIS_IP_) # define rcu_read_release() lock_release(&rcu_lock_map, 1, _THIS_IP_)
#else #else
# define rcu_read_acquire() do { } while (0) # define rcu_read_acquire() do { } while (0)
......
...@@ -183,8 +183,14 @@ do { \ ...@@ -183,8 +183,14 @@ do { \
#ifdef CONFIG_DEBUG_LOCK_ALLOC #ifdef CONFIG_DEBUG_LOCK_ALLOC
# define spin_lock_nested(lock, subclass) _spin_lock_nested(lock, subclass) # define spin_lock_nested(lock, subclass) _spin_lock_nested(lock, subclass)
# define spin_lock_nest_lock(lock, nest_lock) \
do { \
typecheck(struct lockdep_map *, &(nest_lock)->dep_map);\
_spin_lock_nest_lock(lock, &(nest_lock)->dep_map); \
} while (0)
#else #else
# define spin_lock_nested(lock, subclass) _spin_lock(lock) # define spin_lock_nested(lock, subclass) _spin_lock(lock)
# define spin_lock_nest_lock(lock, nest_lock) _spin_lock(lock)
#endif #endif
#define write_lock(lock) _write_lock(lock) #define write_lock(lock) _write_lock(lock)
......
...@@ -22,6 +22,8 @@ int in_lock_functions(unsigned long addr); ...@@ -22,6 +22,8 @@ int in_lock_functions(unsigned long addr);
void __lockfunc _spin_lock(spinlock_t *lock) __acquires(lock); void __lockfunc _spin_lock(spinlock_t *lock) __acquires(lock);
void __lockfunc _spin_lock_nested(spinlock_t *lock, int subclass) void __lockfunc _spin_lock_nested(spinlock_t *lock, int subclass)
__acquires(lock); __acquires(lock);
void __lockfunc _spin_lock_nest_lock(spinlock_t *lock, struct lockdep_map *map)
__acquires(lock);
void __lockfunc _read_lock(rwlock_t *lock) __acquires(lock); void __lockfunc _read_lock(rwlock_t *lock) __acquires(lock);
void __lockfunc _write_lock(rwlock_t *lock) __acquires(lock); void __lockfunc _write_lock(rwlock_t *lock) __acquires(lock);
void __lockfunc _spin_lock_bh(spinlock_t *lock) __acquires(lock); void __lockfunc _spin_lock_bh(spinlock_t *lock) __acquires(lock);
......
This diff is collapsed.
...@@ -17,9 +17,6 @@ ...@@ -17,9 +17,6 @@
*/ */
#define MAX_LOCKDEP_ENTRIES 8192UL #define MAX_LOCKDEP_ENTRIES 8192UL
#define MAX_LOCKDEP_KEYS_BITS 11
#define MAX_LOCKDEP_KEYS (1UL << MAX_LOCKDEP_KEYS_BITS)
#define MAX_LOCKDEP_CHAINS_BITS 14 #define MAX_LOCKDEP_CHAINS_BITS 14
#define MAX_LOCKDEP_CHAINS (1UL << MAX_LOCKDEP_CHAINS_BITS) #define MAX_LOCKDEP_CHAINS (1UL << MAX_LOCKDEP_CHAINS_BITS)
...@@ -53,6 +50,9 @@ extern unsigned int nr_process_chains; ...@@ -53,6 +50,9 @@ extern unsigned int nr_process_chains;
extern unsigned int max_lockdep_depth; extern unsigned int max_lockdep_depth;
extern unsigned int max_recursion_depth; extern unsigned int max_recursion_depth;
extern unsigned long lockdep_count_forward_deps(struct lock_class *);
extern unsigned long lockdep_count_backward_deps(struct lock_class *);
#ifdef CONFIG_DEBUG_LOCKDEP #ifdef CONFIG_DEBUG_LOCKDEP
/* /*
* Various lockdep statistics: * Various lockdep statistics:
......
...@@ -63,34 +63,6 @@ static void l_stop(struct seq_file *m, void *v) ...@@ -63,34 +63,6 @@ static void l_stop(struct seq_file *m, void *v)
{ {
} }
static unsigned long count_forward_deps(struct lock_class *class)
{
struct lock_list *entry;
unsigned long ret = 1;
/*
* Recurse this class's dependency list:
*/
list_for_each_entry(entry, &class->locks_after, entry)
ret += count_forward_deps(entry->class);
return ret;
}
static unsigned long count_backward_deps(struct lock_class *class)
{
struct lock_list *entry;
unsigned long ret = 1;
/*
* Recurse this class's dependency list:
*/
list_for_each_entry(entry, &class->locks_before, entry)
ret += count_backward_deps(entry->class);
return ret;
}
static void print_name(struct seq_file *m, struct lock_class *class) static void print_name(struct seq_file *m, struct lock_class *class)
{ {
char str[128]; char str[128];
...@@ -124,10 +96,10 @@ static int l_show(struct seq_file *m, void *v) ...@@ -124,10 +96,10 @@ static int l_show(struct seq_file *m, void *v)
#ifdef CONFIG_DEBUG_LOCKDEP #ifdef CONFIG_DEBUG_LOCKDEP
seq_printf(m, " OPS:%8ld", class->ops); seq_printf(m, " OPS:%8ld", class->ops);
#endif #endif
nr_forward_deps = count_forward_deps(class); nr_forward_deps = lockdep_count_forward_deps(class);
seq_printf(m, " FD:%5ld", nr_forward_deps); seq_printf(m, " FD:%5ld", nr_forward_deps);
nr_backward_deps = count_backward_deps(class); nr_backward_deps = lockdep_count_backward_deps(class);
seq_printf(m, " BD:%5ld", nr_backward_deps); seq_printf(m, " BD:%5ld", nr_backward_deps);
get_usage_chars(class, &c1, &c2, &c3, &c4); get_usage_chars(class, &c1, &c2, &c3, &c4);
...@@ -229,6 +201,9 @@ static int lc_show(struct seq_file *m, void *v) ...@@ -229,6 +201,9 @@ static int lc_show(struct seq_file *m, void *v)
for (i = 0; i < chain->depth; i++) { for (i = 0; i < chain->depth; i++) {
class = lock_chain_get_class(chain, i); class = lock_chain_get_class(chain, i);
if (!class->key)
continue;
seq_printf(m, "[%p] ", class->key); seq_printf(m, "[%p] ", class->key);
print_name(m, class); print_name(m, class);
seq_puts(m, "\n"); seq_puts(m, "\n");
...@@ -350,7 +325,7 @@ static int lockdep_stats_show(struct seq_file *m, void *v) ...@@ -350,7 +325,7 @@ static int lockdep_stats_show(struct seq_file *m, void *v)
if (class->usage_mask & LOCKF_ENABLED_HARDIRQS_READ) if (class->usage_mask & LOCKF_ENABLED_HARDIRQS_READ)
nr_hardirq_read_unsafe++; nr_hardirq_read_unsafe++;
sum_forward_deps += count_forward_deps(class); sum_forward_deps += lockdep_count_forward_deps(class);
} }
#ifdef CONFIG_DEBUG_LOCKDEP #ifdef CONFIG_DEBUG_LOCKDEP
DEBUG_LOCKS_WARN_ON(debug_atomic_read(&nr_unused_locks) != nr_unused); DEBUG_LOCKS_WARN_ON(debug_atomic_read(&nr_unused_locks) != nr_unused);
......
...@@ -600,7 +600,6 @@ struct rq { ...@@ -600,7 +600,6 @@ struct rq {
/* BKL stats */ /* BKL stats */
unsigned int bkl_count; unsigned int bkl_count;
#endif #endif
struct lock_class_key rq_lock_key;
}; };
static DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); static DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
...@@ -2759,10 +2758,10 @@ static void double_rq_lock(struct rq *rq1, struct rq *rq2) ...@@ -2759,10 +2758,10 @@ static void double_rq_lock(struct rq *rq1, struct rq *rq2)
} else { } else {
if (rq1 < rq2) { if (rq1 < rq2) {
spin_lock(&rq1->lock); spin_lock(&rq1->lock);
spin_lock(&rq2->lock); spin_lock_nested(&rq2->lock, SINGLE_DEPTH_NESTING);
} else { } else {
spin_lock(&rq2->lock); spin_lock(&rq2->lock);
spin_lock(&rq1->lock); spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING);
} }
} }
update_rq_clock(rq1); update_rq_clock(rq1);
...@@ -2805,14 +2804,21 @@ static int double_lock_balance(struct rq *this_rq, struct rq *busiest) ...@@ -2805,14 +2804,21 @@ static int double_lock_balance(struct rq *this_rq, struct rq *busiest)
if (busiest < this_rq) { if (busiest < this_rq) {
spin_unlock(&this_rq->lock); spin_unlock(&this_rq->lock);
spin_lock(&busiest->lock); spin_lock(&busiest->lock);
spin_lock(&this_rq->lock); spin_lock_nested(&this_rq->lock, SINGLE_DEPTH_NESTING);
ret = 1; ret = 1;
} else } else
spin_lock(&busiest->lock); spin_lock_nested(&busiest->lock, SINGLE_DEPTH_NESTING);
} }
return ret; return ret;
} }
static void double_unlock_balance(struct rq *this_rq, struct rq *busiest)
__releases(busiest->lock)
{
spin_unlock(&busiest->lock);
lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_);
}
/* /*
* If dest_cpu is allowed for this process, migrate the task to it. * If dest_cpu is allowed for this process, migrate the task to it.
* This is accomplished by forcing the cpu_allowed mask to only * This is accomplished by forcing the cpu_allowed mask to only
...@@ -3637,7 +3643,7 @@ load_balance_newidle(int this_cpu, struct rq *this_rq, struct sched_domain *sd, ...@@ -3637,7 +3643,7 @@ load_balance_newidle(int this_cpu, struct rq *this_rq, struct sched_domain *sd,
ld_moved = move_tasks(this_rq, this_cpu, busiest, ld_moved = move_tasks(this_rq, this_cpu, busiest,
imbalance, sd, CPU_NEWLY_IDLE, imbalance, sd, CPU_NEWLY_IDLE,
&all_pinned); &all_pinned);
spin_unlock(&busiest->lock); double_unlock_balance(this_rq, busiest);
if (unlikely(all_pinned)) { if (unlikely(all_pinned)) {
cpu_clear(cpu_of(busiest), *cpus); cpu_clear(cpu_of(busiest), *cpus);
...@@ -3752,7 +3758,7 @@ static void active_load_balance(struct rq *busiest_rq, int busiest_cpu) ...@@ -3752,7 +3758,7 @@ static void active_load_balance(struct rq *busiest_rq, int busiest_cpu)
else else
schedstat_inc(sd, alb_failed); schedstat_inc(sd, alb_failed);
} }
spin_unlock(&target_rq->lock); double_unlock_balance(busiest_rq, target_rq);
} }
#ifdef CONFIG_NO_HZ #ifdef CONFIG_NO_HZ
...@@ -8000,7 +8006,6 @@ void __init sched_init(void) ...@@ -8000,7 +8006,6 @@ void __init sched_init(void)
rq = cpu_rq(i); rq = cpu_rq(i);
spin_lock_init(&rq->lock); spin_lock_init(&rq->lock);
lockdep_set_class(&rq->lock, &rq->rq_lock_key);
rq->nr_running = 0; rq->nr_running = 0;
init_cfs_rq(&rq->cfs, rq); init_cfs_rq(&rq->cfs, rq);
init_rt_rq(&rq->rt, rq); init_rt_rq(&rq->rt, rq);
......
...@@ -861,6 +861,8 @@ static void put_prev_task_rt(struct rq *rq, struct task_struct *p) ...@@ -861,6 +861,8 @@ static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
#define RT_MAX_TRIES 3 #define RT_MAX_TRIES 3
static int double_lock_balance(struct rq *this_rq, struct rq *busiest); static int double_lock_balance(struct rq *this_rq, struct rq *busiest);
static void double_unlock_balance(struct rq *this_rq, struct rq *busiest);
static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep); static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep);
static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu) static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
...@@ -1022,7 +1024,7 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq) ...@@ -1022,7 +1024,7 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
break; break;
/* try again */ /* try again */
spin_unlock(&lowest_rq->lock); double_unlock_balance(rq, lowest_rq);
lowest_rq = NULL; lowest_rq = NULL;
} }
...@@ -1091,7 +1093,7 @@ static int push_rt_task(struct rq *rq) ...@@ -1091,7 +1093,7 @@ static int push_rt_task(struct rq *rq)
resched_task(lowest_rq->curr); resched_task(lowest_rq->curr);
spin_unlock(&lowest_rq->lock); double_unlock_balance(rq, lowest_rq);
ret = 1; ret = 1;
out: out:
...@@ -1197,7 +1199,7 @@ static int pull_rt_task(struct rq *this_rq) ...@@ -1197,7 +1199,7 @@ static int pull_rt_task(struct rq *this_rq)
} }
skip: skip:
spin_unlock(&src_rq->lock); double_unlock_balance(this_rq, src_rq);
} }
return ret; return ret;
......
...@@ -292,6 +292,7 @@ void __lockfunc _spin_lock_nested(spinlock_t *lock, int subclass) ...@@ -292,6 +292,7 @@ void __lockfunc _spin_lock_nested(spinlock_t *lock, int subclass)
} }
EXPORT_SYMBOL(_spin_lock_nested); EXPORT_SYMBOL(_spin_lock_nested);
unsigned long __lockfunc _spin_lock_irqsave_nested(spinlock_t *lock, int subclass) unsigned long __lockfunc _spin_lock_irqsave_nested(spinlock_t *lock, int subclass)
{ {
unsigned long flags; unsigned long flags;
...@@ -314,6 +315,16 @@ unsigned long __lockfunc _spin_lock_irqsave_nested(spinlock_t *lock, int subclas ...@@ -314,6 +315,16 @@ unsigned long __lockfunc _spin_lock_irqsave_nested(spinlock_t *lock, int subclas
EXPORT_SYMBOL(_spin_lock_irqsave_nested); EXPORT_SYMBOL(_spin_lock_irqsave_nested);
void __lockfunc _spin_lock_nest_lock(spinlock_t *lock,
struct lockdep_map *nest_lock)
{
preempt_disable();
spin_acquire_nest(&lock->dep_map, 0, 0, nest_lock, _RET_IP_);
LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
}
EXPORT_SYMBOL(_spin_lock_nest_lock);
#endif #endif
void __lockfunc _spin_unlock(spinlock_t *lock) void __lockfunc _spin_unlock(spinlock_t *lock)
......
...@@ -290,11 +290,11 @@ static void run_workqueue(struct cpu_workqueue_struct *cwq) ...@@ -290,11 +290,11 @@ static void run_workqueue(struct cpu_workqueue_struct *cwq)
BUG_ON(get_wq_data(work) != cwq); BUG_ON(get_wq_data(work) != cwq);
work_clear_pending(work); work_clear_pending(work);
lock_acquire(&cwq->wq->lockdep_map, 0, 0, 0, 2, _THIS_IP_); lock_map_acquire(&cwq->wq->lockdep_map);
lock_acquire(&lockdep_map, 0, 0, 0, 2, _THIS_IP_); lock_map_acquire(&lockdep_map);
f(work); f(work);
lock_release(&lockdep_map, 1, _THIS_IP_); lock_map_release(&lockdep_map);
lock_release(&cwq->wq->lockdep_map, 1, _THIS_IP_); lock_map_release(&cwq->wq->lockdep_map);
if (unlikely(in_atomic() || lockdep_depth(current) > 0)) { if (unlikely(in_atomic() || lockdep_depth(current) > 0)) {
printk(KERN_ERR "BUG: workqueue leaked lock or atomic: " printk(KERN_ERR "BUG: workqueue leaked lock or atomic: "
...@@ -413,8 +413,8 @@ void flush_workqueue(struct workqueue_struct *wq) ...@@ -413,8 +413,8 @@ void flush_workqueue(struct workqueue_struct *wq)
int cpu; int cpu;
might_sleep(); might_sleep();
lock_acquire(&wq->lockdep_map, 0, 0, 0, 2, _THIS_IP_); lock_map_acquire(&wq->lockdep_map);
lock_release(&wq->lockdep_map, 1, _THIS_IP_); lock_map_release(&wq->lockdep_map);
for_each_cpu_mask_nr(cpu, *cpu_map) for_each_cpu_mask_nr(cpu, *cpu_map)
flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu)); flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu));
} }
...@@ -441,8 +441,8 @@ int flush_work(struct work_struct *work) ...@@ -441,8 +441,8 @@ int flush_work(struct work_struct *work)
if (!cwq) if (!cwq)
return 0; return 0;
lock_acquire(&cwq->wq->lockdep_map, 0, 0, 0, 2, _THIS_IP_); lock_map_acquire(&cwq->wq->lockdep_map);
lock_release(&cwq->wq->lockdep_map, 1, _THIS_IP_); lock_map_release(&cwq->wq->lockdep_map);
prev = NULL; prev = NULL;
spin_lock_irq(&cwq->lock); spin_lock_irq(&cwq->lock);
...@@ -536,8 +536,8 @@ static void wait_on_work(struct work_struct *work) ...@@ -536,8 +536,8 @@ static void wait_on_work(struct work_struct *work)
might_sleep(); might_sleep();
lock_acquire(&work->lockdep_map, 0, 0, 0, 2, _THIS_IP_); lock_map_acquire(&work->lockdep_map);
lock_release(&work->lockdep_map, 1, _THIS_IP_); lock_map_release(&work->lockdep_map);
cwq = get_wq_data(work); cwq = get_wq_data(work);
if (!cwq) if (!cwq)
...@@ -872,8 +872,8 @@ static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq) ...@@ -872,8 +872,8 @@ static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq)
if (cwq->thread == NULL) if (cwq->thread == NULL)
return; return;
lock_acquire(&cwq->wq->lockdep_map, 0, 0, 0, 2, _THIS_IP_); lock_map_acquire(&cwq->wq->lockdep_map);
lock_release(&cwq->wq->lockdep_map, 1, _THIS_IP_); lock_map_release(&cwq->wq->lockdep_map);
flush_cpu_workqueue(cwq); flush_cpu_workqueue(cwq);
/* /*
......
...@@ -8,6 +8,7 @@ ...@@ -8,6 +8,7 @@
* *
* Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> * Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
*/ */
#include <linux/kernel.h>
#include <linux/rwsem.h> #include <linux/rwsem.h>
#include <linux/mutex.h> #include <linux/mutex.h>
#include <linux/module.h> #include <linux/module.h>
...@@ -37,6 +38,7 @@ int debug_locks_off(void) ...@@ -37,6 +38,7 @@ int debug_locks_off(void)
{ {
if (xchg(&debug_locks, 0)) { if (xchg(&debug_locks, 0)) {
if (!debug_locks_silent) { if (!debug_locks_silent) {
oops_in_progress = 1;
console_verbose(); console_verbose();
return 1; return 1;
} }
......
...@@ -2273,14 +2273,14 @@ int install_special_mapping(struct mm_struct *mm, ...@@ -2273,14 +2273,14 @@ int install_special_mapping(struct mm_struct *mm,
static DEFINE_MUTEX(mm_all_locks_mutex); static DEFINE_MUTEX(mm_all_locks_mutex);
static void vm_lock_anon_vma(struct anon_vma *anon_vma) static void vm_lock_anon_vma(struct mm_struct *mm, struct anon_vma *anon_vma)
{ {
if (!test_bit(0, (unsigned long *) &anon_vma->head.next)) { if (!test_bit(0, (unsigned long *) &anon_vma->head.next)) {
/* /*
* The LSB of head.next can't change from under us * The LSB of head.next can't change from under us
* because we hold the mm_all_locks_mutex. * because we hold the mm_all_locks_mutex.
*/ */
spin_lock(&anon_vma->lock); spin_lock_nest_lock(&anon_vma->lock, &mm->mmap_sem);
/* /*
* We can safely modify head.next after taking the * We can safely modify head.next after taking the
* anon_vma->lock. If some other vma in this mm shares * anon_vma->lock. If some other vma in this mm shares
...@@ -2296,7 +2296,7 @@ static void vm_lock_anon_vma(struct anon_vma *anon_vma) ...@@ -2296,7 +2296,7 @@ static void vm_lock_anon_vma(struct anon_vma *anon_vma)
} }
} }
static void vm_lock_mapping(struct address_space *mapping) static void vm_lock_mapping(struct mm_struct *mm, struct address_space *mapping)
{ {
if (!test_bit(AS_MM_ALL_LOCKS, &mapping->flags)) { if (!test_bit(AS_MM_ALL_LOCKS, &mapping->flags)) {
/* /*
...@@ -2310,7 +2310,7 @@ static void vm_lock_mapping(struct address_space *mapping) ...@@ -2310,7 +2310,7 @@ static void vm_lock_mapping(struct address_space *mapping)
*/ */
if (test_and_set_bit(AS_MM_ALL_LOCKS, &mapping->flags)) if (test_and_set_bit(AS_MM_ALL_LOCKS, &mapping->flags))
BUG(); BUG();
spin_lock(&mapping->i_mmap_lock); spin_lock_nest_lock(&mapping->i_mmap_lock, &mm->mmap_sem);
} }
} }
...@@ -2358,11 +2358,17 @@ int mm_take_all_locks(struct mm_struct *mm) ...@@ -2358,11 +2358,17 @@ int mm_take_all_locks(struct mm_struct *mm)
for (vma = mm->mmap; vma; vma = vma->vm_next) { for (vma = mm->mmap; vma; vma = vma->vm_next) {
if (signal_pending(current)) if (signal_pending(current))
goto out_unlock; goto out_unlock;
if (vma->anon_vma)
vm_lock_anon_vma(vma->anon_vma);
if (vma->vm_file && vma->vm_file->f_mapping) if (vma->vm_file && vma->vm_file->f_mapping)
vm_lock_mapping(vma->vm_file->f_mapping); vm_lock_mapping(mm, vma->vm_file->f_mapping);
}
for (vma = mm->mmap; vma; vma = vma->vm_next) {
if (signal_pending(current))
goto out_unlock;
if (vma->anon_vma)
vm_lock_anon_vma(mm, vma->anon_vma);
} }
ret = 0; ret = 0;
out_unlock: out_unlock:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment