Commit ec484608 authored by Thomas Gleixner's avatar Thomas Gleixner Committed by Ingo Molnar

locking, kprobes: Annotate the hash locks and kretprobe.lock as raw

The kprobe locks can be taken in atomic context and therefore
cannot be preempted on -rt - annotate it.

In mainline this change documents the low level nature of
the lock - otherwise there's no functional difference. Lockdep
and Sparse checking will work as usual.
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 76bf6877
...@@ -181,7 +181,7 @@ struct kretprobe { ...@@ -181,7 +181,7 @@ struct kretprobe {
int nmissed; int nmissed;
size_t data_size; size_t data_size;
struct hlist_head free_instances; struct hlist_head free_instances;
spinlock_t lock; raw_spinlock_t lock;
}; };
struct kretprobe_instance { struct kretprobe_instance {
......
...@@ -78,10 +78,10 @@ static bool kprobes_all_disarmed; ...@@ -78,10 +78,10 @@ static bool kprobes_all_disarmed;
static DEFINE_MUTEX(kprobe_mutex); static DEFINE_MUTEX(kprobe_mutex);
static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL; static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL;
static struct { static struct {
spinlock_t lock ____cacheline_aligned_in_smp; raw_spinlock_t lock ____cacheline_aligned_in_smp;
} kretprobe_table_locks[KPROBE_TABLE_SIZE]; } kretprobe_table_locks[KPROBE_TABLE_SIZE];
static spinlock_t *kretprobe_table_lock_ptr(unsigned long hash) static raw_spinlock_t *kretprobe_table_lock_ptr(unsigned long hash)
{ {
return &(kretprobe_table_locks[hash].lock); return &(kretprobe_table_locks[hash].lock);
} }
...@@ -1013,9 +1013,9 @@ void __kprobes recycle_rp_inst(struct kretprobe_instance *ri, ...@@ -1013,9 +1013,9 @@ void __kprobes recycle_rp_inst(struct kretprobe_instance *ri,
hlist_del(&ri->hlist); hlist_del(&ri->hlist);
INIT_HLIST_NODE(&ri->hlist); INIT_HLIST_NODE(&ri->hlist);
if (likely(rp)) { if (likely(rp)) {
spin_lock(&rp->lock); raw_spin_lock(&rp->lock);
hlist_add_head(&ri->hlist, &rp->free_instances); hlist_add_head(&ri->hlist, &rp->free_instances);
spin_unlock(&rp->lock); raw_spin_unlock(&rp->lock);
} else } else
/* Unregistering */ /* Unregistering */
hlist_add_head(&ri->hlist, head); hlist_add_head(&ri->hlist, head);
...@@ -1026,19 +1026,19 @@ void __kprobes kretprobe_hash_lock(struct task_struct *tsk, ...@@ -1026,19 +1026,19 @@ void __kprobes kretprobe_hash_lock(struct task_struct *tsk,
__acquires(hlist_lock) __acquires(hlist_lock)
{ {
unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS); unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS);
spinlock_t *hlist_lock; raw_spinlock_t *hlist_lock;
*head = &kretprobe_inst_table[hash]; *head = &kretprobe_inst_table[hash];
hlist_lock = kretprobe_table_lock_ptr(hash); hlist_lock = kretprobe_table_lock_ptr(hash);
spin_lock_irqsave(hlist_lock, *flags); raw_spin_lock_irqsave(hlist_lock, *flags);
} }
static void __kprobes kretprobe_table_lock(unsigned long hash, static void __kprobes kretprobe_table_lock(unsigned long hash,
unsigned long *flags) unsigned long *flags)
__acquires(hlist_lock) __acquires(hlist_lock)
{ {
spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash); raw_spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
spin_lock_irqsave(hlist_lock, *flags); raw_spin_lock_irqsave(hlist_lock, *flags);
} }
void __kprobes kretprobe_hash_unlock(struct task_struct *tsk, void __kprobes kretprobe_hash_unlock(struct task_struct *tsk,
...@@ -1046,18 +1046,18 @@ void __kprobes kretprobe_hash_unlock(struct task_struct *tsk, ...@@ -1046,18 +1046,18 @@ void __kprobes kretprobe_hash_unlock(struct task_struct *tsk,
__releases(hlist_lock) __releases(hlist_lock)
{ {
unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS); unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS);
spinlock_t *hlist_lock; raw_spinlock_t *hlist_lock;
hlist_lock = kretprobe_table_lock_ptr(hash); hlist_lock = kretprobe_table_lock_ptr(hash);
spin_unlock_irqrestore(hlist_lock, *flags); raw_spin_unlock_irqrestore(hlist_lock, *flags);
} }
static void __kprobes kretprobe_table_unlock(unsigned long hash, static void __kprobes kretprobe_table_unlock(unsigned long hash,
unsigned long *flags) unsigned long *flags)
__releases(hlist_lock) __releases(hlist_lock)
{ {
spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash); raw_spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
spin_unlock_irqrestore(hlist_lock, *flags); raw_spin_unlock_irqrestore(hlist_lock, *flags);
} }
/* /*
...@@ -1663,12 +1663,12 @@ static int __kprobes pre_handler_kretprobe(struct kprobe *p, ...@@ -1663,12 +1663,12 @@ static int __kprobes pre_handler_kretprobe(struct kprobe *p,
/*TODO: consider to only swap the RA after the last pre_handler fired */ /*TODO: consider to only swap the RA after the last pre_handler fired */
hash = hash_ptr(current, KPROBE_HASH_BITS); hash = hash_ptr(current, KPROBE_HASH_BITS);
spin_lock_irqsave(&rp->lock, flags); raw_spin_lock_irqsave(&rp->lock, flags);
if (!hlist_empty(&rp->free_instances)) { if (!hlist_empty(&rp->free_instances)) {
ri = hlist_entry(rp->free_instances.first, ri = hlist_entry(rp->free_instances.first,
struct kretprobe_instance, hlist); struct kretprobe_instance, hlist);
hlist_del(&ri->hlist); hlist_del(&ri->hlist);
spin_unlock_irqrestore(&rp->lock, flags); raw_spin_unlock_irqrestore(&rp->lock, flags);
ri->rp = rp; ri->rp = rp;
ri->task = current; ri->task = current;
...@@ -1685,7 +1685,7 @@ static int __kprobes pre_handler_kretprobe(struct kprobe *p, ...@@ -1685,7 +1685,7 @@ static int __kprobes pre_handler_kretprobe(struct kprobe *p,
kretprobe_table_unlock(hash, &flags); kretprobe_table_unlock(hash, &flags);
} else { } else {
rp->nmissed++; rp->nmissed++;
spin_unlock_irqrestore(&rp->lock, flags); raw_spin_unlock_irqrestore(&rp->lock, flags);
} }
return 0; return 0;
} }
...@@ -1721,7 +1721,7 @@ int __kprobes register_kretprobe(struct kretprobe *rp) ...@@ -1721,7 +1721,7 @@ int __kprobes register_kretprobe(struct kretprobe *rp)
rp->maxactive = num_possible_cpus(); rp->maxactive = num_possible_cpus();
#endif #endif
} }
spin_lock_init(&rp->lock); raw_spin_lock_init(&rp->lock);
INIT_HLIST_HEAD(&rp->free_instances); INIT_HLIST_HEAD(&rp->free_instances);
for (i = 0; i < rp->maxactive; i++) { for (i = 0; i < rp->maxactive; i++) {
inst = kmalloc(sizeof(struct kretprobe_instance) + inst = kmalloc(sizeof(struct kretprobe_instance) +
...@@ -1959,7 +1959,7 @@ static int __init init_kprobes(void) ...@@ -1959,7 +1959,7 @@ static int __init init_kprobes(void)
for (i = 0; i < KPROBE_TABLE_SIZE; i++) { for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
INIT_HLIST_HEAD(&kprobe_table[i]); INIT_HLIST_HEAD(&kprobe_table[i]);
INIT_HLIST_HEAD(&kretprobe_inst_table[i]); INIT_HLIST_HEAD(&kretprobe_inst_table[i]);
spin_lock_init(&(kretprobe_table_locks[i].lock)); raw_spin_lock_init(&(kretprobe_table_locks[i].lock));
} }
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment