Commit 25764288 authored by Masami Hiramatsu's avatar Masami Hiramatsu Committed by Steven Rostedt

kprobes: Move locks into appropriate functions

Break a big critical region into fine-grained pieces at
registering kprobe path. This helps us to solve circular
locking dependency when introducing ftrace-based kprobes.

Link: http://lkml.kernel.org/r/20120605102826.27845.81689.stgit@localhost.localdomain

Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Ananth N Mavinakayanahalli <ananth@in.ibm.com>
Cc: "Frank Ch. Eigler" <fche@redhat.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Signed-off-by: default avatarMasami Hiramatsu <masami.hiramatsu.pt@hitachi.com>
Signed-off-by: default avatarSteven Rostedt <rostedt@goodmis.org>
parent f7fa6ef0
...@@ -759,20 +759,28 @@ static __kprobes void try_to_optimize_kprobe(struct kprobe *p) ...@@ -759,20 +759,28 @@ static __kprobes void try_to_optimize_kprobe(struct kprobe *p)
struct kprobe *ap; struct kprobe *ap;
struct optimized_kprobe *op; struct optimized_kprobe *op;
/* For preparing optimization, jump_label_text_reserved() is called */
jump_label_lock();
mutex_lock(&text_mutex);
ap = alloc_aggr_kprobe(p); ap = alloc_aggr_kprobe(p);
if (!ap) if (!ap)
return; goto out;
op = container_of(ap, struct optimized_kprobe, kp); op = container_of(ap, struct optimized_kprobe, kp);
if (!arch_prepared_optinsn(&op->optinsn)) { if (!arch_prepared_optinsn(&op->optinsn)) {
/* If failed to setup optimizing, fallback to kprobe */ /* If failed to setup optimizing, fallback to kprobe */
arch_remove_optimized_kprobe(op); arch_remove_optimized_kprobe(op);
kfree(op); kfree(op);
return; goto out;
} }
init_aggr_kprobe(ap, p); init_aggr_kprobe(ap, p);
optimize_kprobe(ap); optimize_kprobe(ap); /* This just kicks optimizer thread */
out:
mutex_unlock(&text_mutex);
jump_label_unlock();
} }
#ifdef CONFIG_SYSCTL #ifdef CONFIG_SYSCTL
...@@ -1144,12 +1152,6 @@ static int __kprobes add_new_kprobe(struct kprobe *ap, struct kprobe *p) ...@@ -1144,12 +1152,6 @@ static int __kprobes add_new_kprobe(struct kprobe *ap, struct kprobe *p)
if (p->post_handler && !ap->post_handler) if (p->post_handler && !ap->post_handler)
ap->post_handler = aggr_post_handler; ap->post_handler = aggr_post_handler;
if (kprobe_disabled(ap) && !kprobe_disabled(p)) {
ap->flags &= ~KPROBE_FLAG_DISABLED;
if (!kprobes_all_disarmed)
/* Arm the breakpoint again. */
__arm_kprobe(ap);
}
return 0; return 0;
} }
...@@ -1189,11 +1191,22 @@ static int __kprobes register_aggr_kprobe(struct kprobe *orig_p, ...@@ -1189,11 +1191,22 @@ static int __kprobes register_aggr_kprobe(struct kprobe *orig_p,
int ret = 0; int ret = 0;
struct kprobe *ap = orig_p; struct kprobe *ap = orig_p;
/* For preparing optimization, jump_label_text_reserved() is called */
jump_label_lock();
/*
* Get online CPUs to avoid text_mutex deadlock.with stop machine,
* which is invoked by unoptimize_kprobe() in add_new_kprobe()
*/
get_online_cpus();
mutex_lock(&text_mutex);
if (!kprobe_aggrprobe(orig_p)) { if (!kprobe_aggrprobe(orig_p)) {
/* If orig_p is not an aggr_kprobe, create new aggr_kprobe. */ /* If orig_p is not an aggr_kprobe, create new aggr_kprobe. */
ap = alloc_aggr_kprobe(orig_p); ap = alloc_aggr_kprobe(orig_p);
if (!ap) if (!ap) {
return -ENOMEM; ret = -ENOMEM;
goto out;
}
init_aggr_kprobe(ap, orig_p); init_aggr_kprobe(ap, orig_p);
} else if (kprobe_unused(ap)) } else if (kprobe_unused(ap))
/* This probe is going to die. Rescue it */ /* This probe is going to die. Rescue it */
...@@ -1213,7 +1226,7 @@ static int __kprobes register_aggr_kprobe(struct kprobe *orig_p, ...@@ -1213,7 +1226,7 @@ static int __kprobes register_aggr_kprobe(struct kprobe *orig_p,
* free aggr_probe. It will be used next time, or * free aggr_probe. It will be used next time, or
* freed by unregister_kprobe. * freed by unregister_kprobe.
*/ */
return ret; goto out;
/* Prepare optimized instructions if possible. */ /* Prepare optimized instructions if possible. */
prepare_optimized_kprobe(ap); prepare_optimized_kprobe(ap);
...@@ -1228,7 +1241,20 @@ static int __kprobes register_aggr_kprobe(struct kprobe *orig_p, ...@@ -1228,7 +1241,20 @@ static int __kprobes register_aggr_kprobe(struct kprobe *orig_p,
/* Copy ap's insn slot to p */ /* Copy ap's insn slot to p */
copy_kprobe(ap, p); copy_kprobe(ap, p);
return add_new_kprobe(ap, p); ret = add_new_kprobe(ap, p);
out:
mutex_unlock(&text_mutex);
put_online_cpus();
jump_label_unlock();
if (ret == 0 && kprobe_disabled(ap) && !kprobe_disabled(p)) {
ap->flags &= ~KPROBE_FLAG_DISABLED;
if (!kprobes_all_disarmed)
/* Arm the breakpoint again. */
arm_kprobe(ap);
}
return ret;
} }
static int __kprobes in_kprobes_functions(unsigned long addr) static int __kprobes in_kprobes_functions(unsigned long addr)
...@@ -1387,10 +1413,6 @@ int __kprobes register_kprobe(struct kprobe *p) ...@@ -1387,10 +1413,6 @@ int __kprobes register_kprobe(struct kprobe *p)
return ret; return ret;
mutex_lock(&kprobe_mutex); mutex_lock(&kprobe_mutex);
jump_label_lock(); /* needed to call jump_label_text_reserved() */
get_online_cpus(); /* For avoiding text_mutex deadlock. */
mutex_lock(&text_mutex);
old_p = get_kprobe(p->addr); old_p = get_kprobe(p->addr);
if (old_p) { if (old_p) {
...@@ -1399,7 +1421,9 @@ int __kprobes register_kprobe(struct kprobe *p) ...@@ -1399,7 +1421,9 @@ int __kprobes register_kprobe(struct kprobe *p)
goto out; goto out;
} }
mutex_lock(&text_mutex); /* Avoiding text modification */
ret = arch_prepare_kprobe(p); ret = arch_prepare_kprobe(p);
mutex_unlock(&text_mutex);
if (ret) if (ret)
goto out; goto out;
...@@ -1408,15 +1432,12 @@ int __kprobes register_kprobe(struct kprobe *p) ...@@ -1408,15 +1432,12 @@ int __kprobes register_kprobe(struct kprobe *p)
&kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]); &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]);
if (!kprobes_all_disarmed && !kprobe_disabled(p)) if (!kprobes_all_disarmed && !kprobe_disabled(p))
__arm_kprobe(p); arm_kprobe(p);
/* Try to optimize kprobe */ /* Try to optimize kprobe */
try_to_optimize_kprobe(p); try_to_optimize_kprobe(p);
out: out:
mutex_unlock(&text_mutex);
put_online_cpus();
jump_label_unlock();
mutex_unlock(&kprobe_mutex); mutex_unlock(&kprobe_mutex);
if (probed_mod) if (probed_mod)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment