Commit 31552385 authored by Thomas Gleixner's avatar Thomas Gleixner Committed by Ingo Molnar

locking/spinlock/rt: Prepare for RT local_lock

Add the static and runtime initializer mechanics to support the RT variant
of local_lock, which requires the lock type in the lockdep map to be set
to LD_LOCK_PERCPU.
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
Link: https://lore.kernel.org/r/20210815211305.967526724@linutronix.de
parent 992caf7f
...@@ -8,20 +8,28 @@ ...@@ -8,20 +8,28 @@
#ifdef CONFIG_DEBUG_LOCK_ALLOC #ifdef CONFIG_DEBUG_LOCK_ALLOC
extern void __rt_spin_lock_init(spinlock_t *lock, const char *name, extern void __rt_spin_lock_init(spinlock_t *lock, const char *name,
struct lock_class_key *key); struct lock_class_key *key, bool percpu);
#else #else
static inline void __rt_spin_lock_init(spinlock_t *lock, const char *name, static inline void __rt_spin_lock_init(spinlock_t *lock, const char *name,
struct lock_class_key *key) struct lock_class_key *key, bool percpu)
{ {
} }
#endif #endif
#define spin_lock_init(slock) \ #define spin_lock_init(slock) \
do { \ do { \
static struct lock_class_key __key; \ static struct lock_class_key __key; \
\ \
rt_mutex_base_init(&(slock)->lock); \ rt_mutex_base_init(&(slock)->lock); \
__rt_spin_lock_init(slock, #slock, &__key); \ __rt_spin_lock_init(slock, #slock, &__key, false); \
} while (0)
#define local_spin_lock_init(slock) \
do { \
static struct lock_class_key __key; \
\
rt_mutex_base_init(&(slock)->lock); \
__rt_spin_lock_init(slock, #slock, &__key, true); \
} while (0) } while (0)
extern void rt_spin_lock(spinlock_t *lock); extern void rt_spin_lock(spinlock_t *lock);
......
...@@ -60,6 +60,12 @@ typedef struct spinlock { ...@@ -60,6 +60,12 @@ typedef struct spinlock {
SPIN_DEP_MAP_INIT(name) \ SPIN_DEP_MAP_INIT(name) \
} }
#define __LOCAL_SPIN_LOCK_UNLOCKED(name) \
{ \
.lock = __RT_MUTEX_BASE_INITIALIZER(name.lock), \
LOCAL_SPIN_DEP_MAP_INIT(name) \
}
#define DEFINE_SPINLOCK(name) \ #define DEFINE_SPINLOCK(name) \
spinlock_t name = __SPIN_LOCK_UNLOCKED(name) spinlock_t name = __SPIN_LOCK_UNLOCKED(name)
......
...@@ -37,9 +37,17 @@ typedef struct raw_spinlock { ...@@ -37,9 +37,17 @@ typedef struct raw_spinlock {
.name = #lockname, \ .name = #lockname, \
.wait_type_inner = LD_WAIT_CONFIG, \ .wait_type_inner = LD_WAIT_CONFIG, \
} }
# define LOCAL_SPIN_DEP_MAP_INIT(lockname) \
.dep_map = { \
.name = #lockname, \
.wait_type_inner = LD_WAIT_CONFIG, \
.lock_type = LD_LOCK_PERCPU, \
}
#else #else
# define RAW_SPIN_DEP_MAP_INIT(lockname) # define RAW_SPIN_DEP_MAP_INIT(lockname)
# define SPIN_DEP_MAP_INIT(lockname) # define SPIN_DEP_MAP_INIT(lockname)
# define LOCAL_SPIN_DEP_MAP_INIT(lockname)
#endif #endif
#ifdef CONFIG_DEBUG_SPINLOCK #ifdef CONFIG_DEBUG_SPINLOCK
......
...@@ -120,10 +120,13 @@ EXPORT_SYMBOL(rt_spin_trylock_bh); ...@@ -120,10 +120,13 @@ EXPORT_SYMBOL(rt_spin_trylock_bh);
#ifdef CONFIG_DEBUG_LOCK_ALLOC #ifdef CONFIG_DEBUG_LOCK_ALLOC
void __rt_spin_lock_init(spinlock_t *lock, const char *name, void __rt_spin_lock_init(spinlock_t *lock, const char *name,
struct lock_class_key *key) struct lock_class_key *key, bool percpu)
{ {
u8 type = percpu ? LD_LOCK_PERCPU : LD_LOCK_NORMAL;
debug_check_no_locks_freed((void *)lock, sizeof(*lock)); debug_check_no_locks_freed((void *)lock, sizeof(*lock));
lockdep_init_map_wait(&lock->dep_map, name, key, 0, LD_WAIT_CONFIG); lockdep_init_map_type(&lock->dep_map, name, key, 0, LD_WAIT_CONFIG,
LD_WAIT_INV, type);
} }
EXPORT_SYMBOL(__rt_spin_lock_init); EXPORT_SYMBOL(__rt_spin_lock_init);
#endif #endif
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment