Commit 4ea2176d authored by Ingo Molnar's avatar Ingo Molnar Committed by Linus Torvalds

[PATCH] lockdep: prove rwsem locking correctness

Use the lock validator framework to prove rwsem locking correctness.
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
Signed-off-by: default avatarArjan van de Ven <arjan@linux.intel.com>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent a8f24a39
...@@ -40,6 +40,7 @@ ...@@ -40,6 +40,7 @@
#include <linux/list.h> #include <linux/list.h>
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <linux/lockdep.h>
struct rwsem_waiter; struct rwsem_waiter;
...@@ -61,21 +62,34 @@ struct rw_semaphore { ...@@ -61,21 +62,34 @@ struct rw_semaphore {
#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
spinlock_t wait_lock; spinlock_t wait_lock;
struct list_head wait_list; struct list_head wait_list;
#ifdef CONFIG_DEBUG_LOCK_ALLOC
struct lockdep_map dep_map;
#endif
}; };
#ifdef CONFIG_DEBUG_LOCK_ALLOC
# define __RWSEM_DEP_MAP_INIT(lockname) , .dep_map = { .name = #lockname }
#else
# define __RWSEM_DEP_MAP_INIT(lockname)
#endif
#define __RWSEM_INITIALIZER(name) \ #define __RWSEM_INITIALIZER(name) \
{ RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, LIST_HEAD_INIT((name).wait_list) \ { RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, LIST_HEAD_INIT((name).wait_list) \
} __RWSEM_DEP_MAP_INIT(name) }
#define DECLARE_RWSEM(name) \ #define DECLARE_RWSEM(name) \
struct rw_semaphore name = __RWSEM_INITIALIZER(name) struct rw_semaphore name = __RWSEM_INITIALIZER(name)
static inline void init_rwsem(struct rw_semaphore *sem) extern void __init_rwsem(struct rw_semaphore *sem, const char *name,
{ struct lock_class_key *key);
sem->count = RWSEM_UNLOCKED_VALUE;
spin_lock_init(&sem->wait_lock); #define init_rwsem(sem) \
INIT_LIST_HEAD(&sem->wait_list); do { \
} static struct lock_class_key __key; \
\
__init_rwsem((sem), #sem, &__key); \
} while (0)
/* /*
* lock for reading * lock for reading
...@@ -128,7 +142,7 @@ LOCK_PREFIX " cmpxchgl %2,%0\n\t" ...@@ -128,7 +142,7 @@ LOCK_PREFIX " cmpxchgl %2,%0\n\t"
/* /*
* lock for writing * lock for writing
*/ */
static inline void __down_write(struct rw_semaphore *sem) static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
{ {
int tmp; int tmp;
...@@ -152,6 +166,11 @@ LOCK_PREFIX " xadd %%edx,(%%eax)\n\t" /* subtract 0x0000ffff, returns the ...@@ -152,6 +166,11 @@ LOCK_PREFIX " xadd %%edx,(%%eax)\n\t" /* subtract 0x0000ffff, returns the
: "memory", "cc"); : "memory", "cc");
} }
static inline void __down_write(struct rw_semaphore *sem)
{
__down_write_nested(sem, 0);
}
/* /*
* trylock for writing -- returns 1 if successful, 0 if contention * trylock for writing -- returns 1 if successful, 0 if contention
*/ */
......
...@@ -61,6 +61,9 @@ struct rw_semaphore { ...@@ -61,6 +61,9 @@ struct rw_semaphore {
signed long count; signed long count;
spinlock_t wait_lock; spinlock_t wait_lock;
struct list_head wait_list; struct list_head wait_list;
#ifdef CONFIG_DEBUG_LOCK_ALLOC
struct lockdep_map dep_map;
#endif
}; };
#ifndef __s390x__ #ifndef __s390x__
...@@ -80,8 +83,16 @@ struct rw_semaphore { ...@@ -80,8 +83,16 @@ struct rw_semaphore {
/* /*
* initialisation * initialisation
*/ */
#ifdef CONFIG_DEBUG_LOCK_ALLOC
# define __RWSEM_DEP_MAP_INIT(lockname) , .dep_map = { .name = #lockname }
#else
# define __RWSEM_DEP_MAP_INIT(lockname)
#endif
#define __RWSEM_INITIALIZER(name) \ #define __RWSEM_INITIALIZER(name) \
{ RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, LIST_HEAD_INIT((name).wait_list) } { RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, LIST_HEAD_INIT((name).wait_list) \
__RWSEM_DEP_MAP_INIT(name) }
#define DECLARE_RWSEM(name) \ #define DECLARE_RWSEM(name) \
struct rw_semaphore name = __RWSEM_INITIALIZER(name) struct rw_semaphore name = __RWSEM_INITIALIZER(name)
...@@ -93,6 +104,17 @@ static inline void init_rwsem(struct rw_semaphore *sem) ...@@ -93,6 +104,17 @@ static inline void init_rwsem(struct rw_semaphore *sem)
INIT_LIST_HEAD(&sem->wait_list); INIT_LIST_HEAD(&sem->wait_list);
} }
extern void __init_rwsem(struct rw_semaphore *sem, const char *name,
struct lock_class_key *key);
#define init_rwsem(sem) \
do { \
static struct lock_class_key __key; \
\
__init_rwsem((sem), #sem, &__key); \
} while (0)
/* /*
* lock for reading * lock for reading
*/ */
...@@ -155,7 +177,7 @@ static inline int __down_read_trylock(struct rw_semaphore *sem) ...@@ -155,7 +177,7 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
/* /*
* lock for writing * lock for writing
*/ */
static inline void __down_write(struct rw_semaphore *sem) static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
{ {
signed long old, new, tmp; signed long old, new, tmp;
...@@ -181,6 +203,11 @@ static inline void __down_write(struct rw_semaphore *sem) ...@@ -181,6 +203,11 @@ static inline void __down_write(struct rw_semaphore *sem)
rwsem_down_write_failed(sem); rwsem_down_write_failed(sem);
} }
static inline void __down_write(struct rw_semaphore *sem)
{
__down_write_nested(sem, 0);
}
/* /*
* trylock for writing -- returns 1 if successful, 0 if contention * trylock for writing -- returns 1 if successful, 0 if contention
*/ */
......
...@@ -37,7 +37,8 @@ struct semaphore { ...@@ -37,7 +37,8 @@ struct semaphore {
static inline void sema_init (struct semaphore *sem, int val) static inline void sema_init (struct semaphore *sem, int val)
{ {
*sem = (struct semaphore) __SEMAPHORE_INITIALIZER((*sem),val); atomic_set(&sem->count, val);
init_waitqueue_head(&sem->wait);
} }
static inline void init_MUTEX (struct semaphore *sem) static inline void init_MUTEX (struct semaphore *sem)
......
...@@ -32,18 +32,37 @@ struct rw_semaphore { ...@@ -32,18 +32,37 @@ struct rw_semaphore {
__s32 activity; __s32 activity;
spinlock_t wait_lock; spinlock_t wait_lock;
struct list_head wait_list; struct list_head wait_list;
#ifdef CONFIG_DEBUG_LOCK_ALLOC
struct lockdep_map dep_map;
#endif
}; };
#ifdef CONFIG_DEBUG_LOCK_ALLOC
# define __RWSEM_DEP_MAP_INIT(lockname) , .dep_map = { .name = #lockname }
#else
# define __RWSEM_DEP_MAP_INIT(lockname)
#endif
#define __RWSEM_INITIALIZER(name) \ #define __RWSEM_INITIALIZER(name) \
{ 0, SPIN_LOCK_UNLOCKED, LIST_HEAD_INIT((name).wait_list) } { 0, SPIN_LOCK_UNLOCKED, LIST_HEAD_INIT((name).wait_list) __RWSEM_DEP_MAP_INIT(name) }
#define DECLARE_RWSEM(name) \ #define DECLARE_RWSEM(name) \
struct rw_semaphore name = __RWSEM_INITIALIZER(name) struct rw_semaphore name = __RWSEM_INITIALIZER(name)
extern void FASTCALL(init_rwsem(struct rw_semaphore *sem)); extern void __init_rwsem(struct rw_semaphore *sem, const char *name,
struct lock_class_key *key);
#define init_rwsem(sem) \
do { \
static struct lock_class_key __key; \
\
__init_rwsem((sem), #sem, &__key); \
} while (0)
extern void FASTCALL(__down_read(struct rw_semaphore *sem)); extern void FASTCALL(__down_read(struct rw_semaphore *sem));
extern int FASTCALL(__down_read_trylock(struct rw_semaphore *sem)); extern int FASTCALL(__down_read_trylock(struct rw_semaphore *sem));
extern void FASTCALL(__down_write(struct rw_semaphore *sem)); extern void FASTCALL(__down_write(struct rw_semaphore *sem));
extern void FASTCALL(__down_write_nested(struct rw_semaphore *sem, int subclass));
extern int FASTCALL(__down_write_trylock(struct rw_semaphore *sem)); extern int FASTCALL(__down_write_trylock(struct rw_semaphore *sem));
extern void FASTCALL(__up_read(struct rw_semaphore *sem)); extern void FASTCALL(__up_read(struct rw_semaphore *sem));
extern void FASTCALL(__up_write(struct rw_semaphore *sem)); extern void FASTCALL(__up_write(struct rw_semaphore *sem));
......
...@@ -27,64 +27,55 @@ struct rw_semaphore; ...@@ -27,64 +27,55 @@ struct rw_semaphore;
/* /*
* lock for reading * lock for reading
*/ */
static inline void down_read(struct rw_semaphore *sem) extern void down_read(struct rw_semaphore *sem);
{
might_sleep();
__down_read(sem);
}
/* /*
* trylock for reading -- returns 1 if successful, 0 if contention * trylock for reading -- returns 1 if successful, 0 if contention
*/ */
static inline int down_read_trylock(struct rw_semaphore *sem) extern int down_read_trylock(struct rw_semaphore *sem);
{
int ret;
ret = __down_read_trylock(sem);
return ret;
}
/* /*
* lock for writing * lock for writing
*/ */
static inline void down_write(struct rw_semaphore *sem) extern void down_write(struct rw_semaphore *sem);
{
might_sleep();
__down_write(sem);
}
/* /*
* trylock for writing -- returns 1 if successful, 0 if contention * trylock for writing -- returns 1 if successful, 0 if contention
*/ */
static inline int down_write_trylock(struct rw_semaphore *sem) extern int down_write_trylock(struct rw_semaphore *sem);
{
int ret;
ret = __down_write_trylock(sem);
return ret;
}
/* /*
* release a read lock * release a read lock
*/ */
static inline void up_read(struct rw_semaphore *sem) extern void up_read(struct rw_semaphore *sem);
{
__up_read(sem);
}
/* /*
* release a write lock * release a write lock
*/ */
static inline void up_write(struct rw_semaphore *sem) extern void up_write(struct rw_semaphore *sem);
{
__up_write(sem);
}
/* /*
* downgrade write lock to read lock * downgrade write lock to read lock
*/ */
static inline void downgrade_write(struct rw_semaphore *sem) extern void downgrade_write(struct rw_semaphore *sem);
{
__downgrade_write(sem); #ifdef CONFIG_DEBUG_LOCK_ALLOC
} /*
* nested locking:
*/
extern void down_read_nested(struct rw_semaphore *sem, int subclass);
extern void down_write_nested(struct rw_semaphore *sem, int subclass);
/*
* Take/release a lock when not the owner will release it:
*/
extern void down_read_non_owner(struct rw_semaphore *sem);
extern void up_read_non_owner(struct rw_semaphore *sem);
#else
# define down_read_nested(sem, subclass) down_read(sem)
# define down_write_nested(sem, subclass) down_write(sem)
# define down_read_non_owner(sem) down_read(sem)
# define up_read_non_owner(sem) up_read(sem)
#endif
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
#endif /* _LINUX_RWSEM_H */ #endif /* _LINUX_RWSEM_H */
...@@ -8,7 +8,7 @@ obj-y = sched.o fork.o exec_domain.o panic.o printk.o profile.o \ ...@@ -8,7 +8,7 @@ obj-y = sched.o fork.o exec_domain.o panic.o printk.o profile.o \
signal.o sys.o kmod.o workqueue.o pid.o \ signal.o sys.o kmod.o workqueue.o pid.o \
rcupdate.o extable.o params.o posix-timers.o \ rcupdate.o extable.o params.o posix-timers.o \
kthread.o wait.o kfifo.o sys_ni.o posix-cpu-timers.o mutex.o \ kthread.o wait.o kfifo.o sys_ni.o posix-cpu-timers.o mutex.o \
hrtimer.o hrtimer.o rwsem.o
obj-$(CONFIG_STACKTRACE) += stacktrace.o obj-$(CONFIG_STACKTRACE) += stacktrace.o
obj-y += time/ obj-y += time/
......
...@@ -103,3 +103,45 @@ void downgrade_write(struct rw_semaphore *sem) ...@@ -103,3 +103,45 @@ void downgrade_write(struct rw_semaphore *sem)
} }
EXPORT_SYMBOL(downgrade_write); EXPORT_SYMBOL(downgrade_write);
#ifdef CONFIG_DEBUG_LOCK_ALLOC
void down_read_nested(struct rw_semaphore *sem, int subclass)
{
might_sleep();
rwsem_acquire_read(&sem->dep_map, subclass, 0, _RET_IP_);
__down_read(sem);
}
EXPORT_SYMBOL(down_read_nested);
void down_read_non_owner(struct rw_semaphore *sem)
{
might_sleep();
__down_read(sem);
}
EXPORT_SYMBOL(down_read_non_owner);
void down_write_nested(struct rw_semaphore *sem, int subclass)
{
might_sleep();
rwsem_acquire(&sem->dep_map, subclass, 0, _RET_IP_);
__down_write_nested(sem, subclass);
}
EXPORT_SYMBOL(down_write_nested);
void up_read_non_owner(struct rw_semaphore *sem)
{
__up_read(sem);
}
EXPORT_SYMBOL(up_read_non_owner);
#endif
...@@ -20,8 +20,16 @@ struct rwsem_waiter { ...@@ -20,8 +20,16 @@ struct rwsem_waiter {
/* /*
* initialise the semaphore * initialise the semaphore
*/ */
void fastcall init_rwsem(struct rw_semaphore *sem) void __init_rwsem(struct rw_semaphore *sem, const char *name,
struct lock_class_key *key)
{ {
#ifdef CONFIG_DEBUG_LOCK_ALLOC
/*
* Make sure we are not reinitializing a held semaphore:
*/
debug_check_no_locks_freed((void *)sem, sizeof(*sem));
lockdep_init_map(&sem->dep_map, name, key);
#endif
sem->activity = 0; sem->activity = 0;
spin_lock_init(&sem->wait_lock); spin_lock_init(&sem->wait_lock);
INIT_LIST_HEAD(&sem->wait_list); INIT_LIST_HEAD(&sem->wait_list);
...@@ -183,7 +191,7 @@ int fastcall __down_read_trylock(struct rw_semaphore *sem) ...@@ -183,7 +191,7 @@ int fastcall __down_read_trylock(struct rw_semaphore *sem)
* get a write lock on the semaphore * get a write lock on the semaphore
* - we increment the waiting count anyway to indicate an exclusive lock * - we increment the waiting count anyway to indicate an exclusive lock
*/ */
void fastcall __sched __down_write(struct rw_semaphore *sem) void fastcall __sched __down_write_nested(struct rw_semaphore *sem, int subclass)
{ {
struct rwsem_waiter waiter; struct rwsem_waiter waiter;
struct task_struct *tsk; struct task_struct *tsk;
...@@ -223,6 +231,11 @@ void fastcall __sched __down_write(struct rw_semaphore *sem) ...@@ -223,6 +231,11 @@ void fastcall __sched __down_write(struct rw_semaphore *sem)
; ;
} }
void fastcall __sched __down_write(struct rw_semaphore *sem)
{
__down_write_nested(sem, 0);
}
/* /*
* trylock for writing -- returns 1 if successful, 0 if contention * trylock for writing -- returns 1 if successful, 0 if contention
*/ */
...@@ -292,9 +305,10 @@ void fastcall __downgrade_write(struct rw_semaphore *sem) ...@@ -292,9 +305,10 @@ void fastcall __downgrade_write(struct rw_semaphore *sem)
spin_unlock_irqrestore(&sem->wait_lock, flags); spin_unlock_irqrestore(&sem->wait_lock, flags);
} }
EXPORT_SYMBOL(init_rwsem); EXPORT_SYMBOL(__init_rwsem);
EXPORT_SYMBOL(__down_read); EXPORT_SYMBOL(__down_read);
EXPORT_SYMBOL(__down_read_trylock); EXPORT_SYMBOL(__down_read_trylock);
EXPORT_SYMBOL(__down_write_nested);
EXPORT_SYMBOL(__down_write); EXPORT_SYMBOL(__down_write);
EXPORT_SYMBOL(__down_write_trylock); EXPORT_SYMBOL(__down_write_trylock);
EXPORT_SYMBOL(__up_read); EXPORT_SYMBOL(__up_read);
......
...@@ -8,6 +8,26 @@ ...@@ -8,6 +8,26 @@
#include <linux/init.h> #include <linux/init.h>
#include <linux/module.h> #include <linux/module.h>
/*
* Initialize an rwsem:
*/
void __init_rwsem(struct rw_semaphore *sem, const char *name,
struct lock_class_key *key)
{
#ifdef CONFIG_DEBUG_LOCK_ALLOC
/*
* Make sure we are not reinitializing a held semaphore:
*/
debug_check_no_locks_freed((void *)sem, sizeof(*sem));
lockdep_init_map(&sem->dep_map, name, key);
#endif
sem->count = RWSEM_UNLOCKED_VALUE;
spin_lock_init(&sem->wait_lock);
INIT_LIST_HEAD(&sem->wait_list);
}
EXPORT_SYMBOL(__init_rwsem);
struct rwsem_waiter { struct rwsem_waiter {
struct list_head list; struct list_head list;
struct task_struct *task; struct task_struct *task;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment