Commit 778b032d authored by Oleg Nesterov's avatar Oleg Nesterov Committed by Ingo Molnar

uprobes: Kill uprobes_srcu/uprobe_srcu_id

Kill the no longer needed uprobes_srcu/uprobe_srcu_id code.

It doesn't really work anyway. synchronize_srcu() can only
synchronize with the code "inside" the
srcu_read_lock/srcu_read_unlock section, while
uprobe_pre_sstep_notifier() does srcu_read_lock() _after_ we
already hit the breakpoint.

I guess this probably works "in practice". synchronize_srcu() is
slow and it implies synchronize_sched(), and the probed task
enters the non- preemptible section at the start of exception
handler. Still this is not right at least in theory, and
task->uprobe_srcu_id blows task_struct.
Signed-off-by: default avatarOleg Nesterov <oleg@redhat.com>
Acked-by: default avatarSrikar Dronamraju <srikar@linux.vnet.ibm.com>
Cc: Ananth N Mavinakayanahalli <ananth@in.ibm.com>
Cc: Anton Arapov <anton@redhat.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/r/20120529193008.GG8057@redhat.comSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 56bb4cf6
...@@ -1569,7 +1569,6 @@ struct task_struct { ...@@ -1569,7 +1569,6 @@ struct task_struct {
#endif #endif
#ifdef CONFIG_UPROBES #ifdef CONFIG_UPROBES
struct uprobe_task *utask; struct uprobe_task *utask;
int uprobe_srcu_id;
#endif #endif
}; };
......
...@@ -38,7 +38,6 @@ ...@@ -38,7 +38,6 @@
#define UINSNS_PER_PAGE (PAGE_SIZE/UPROBE_XOL_SLOT_BYTES) #define UINSNS_PER_PAGE (PAGE_SIZE/UPROBE_XOL_SLOT_BYTES)
#define MAX_UPROBE_XOL_SLOTS UINSNS_PER_PAGE #define MAX_UPROBE_XOL_SLOTS UINSNS_PER_PAGE
static struct srcu_struct uprobes_srcu;
static struct rb_root uprobes_tree = RB_ROOT; static struct rb_root uprobes_tree = RB_ROOT;
static DEFINE_SPINLOCK(uprobes_treelock); /* serialize rbtree access */ static DEFINE_SPINLOCK(uprobes_treelock); /* serialize rbtree access */
...@@ -738,20 +737,14 @@ remove_breakpoint(struct uprobe *uprobe, struct mm_struct *mm, loff_t vaddr) ...@@ -738,20 +737,14 @@ remove_breakpoint(struct uprobe *uprobe, struct mm_struct *mm, loff_t vaddr)
} }
/* /*
* There could be threads that have hit the breakpoint and are entering the * There could be threads that have already hit the breakpoint. They
* notifier code and trying to acquire the uprobes_treelock. The thread * will recheck the current insn and restart if find_uprobe() fails.
* calling delete_uprobe() that is removing the uprobe from the rb_tree can * See find_active_uprobe().
* race with these threads and might acquire the uprobes_treelock compared
* to some of the breakpoint hit threads. In such a case, the breakpoint
* hit threads will not find the uprobe. The current unregistering thread
* waits till all other threads have hit a breakpoint, to acquire the
* uprobes_treelock before the uprobe is removed from the rbtree.
*/ */
static void delete_uprobe(struct uprobe *uprobe) static void delete_uprobe(struct uprobe *uprobe)
{ {
unsigned long flags; unsigned long flags;
synchronize_srcu(&uprobes_srcu);
spin_lock_irqsave(&uprobes_treelock, flags); spin_lock_irqsave(&uprobes_treelock, flags);
rb_erase(&uprobe->rb_node, &uprobes_tree); rb_erase(&uprobe->rb_node, &uprobes_tree);
spin_unlock_irqrestore(&uprobes_treelock, flags); spin_unlock_irqrestore(&uprobes_treelock, flags);
...@@ -1388,9 +1381,6 @@ void uprobe_free_utask(struct task_struct *t) ...@@ -1388,9 +1381,6 @@ void uprobe_free_utask(struct task_struct *t)
{ {
struct uprobe_task *utask = t->utask; struct uprobe_task *utask = t->utask;
if (t->uprobe_srcu_id != -1)
srcu_read_unlock_raw(&uprobes_srcu, t->uprobe_srcu_id);
if (!utask) if (!utask)
return; return;
...@@ -1408,7 +1398,6 @@ void uprobe_free_utask(struct task_struct *t) ...@@ -1408,7 +1398,6 @@ void uprobe_free_utask(struct task_struct *t)
void uprobe_copy_process(struct task_struct *t) void uprobe_copy_process(struct task_struct *t)
{ {
t->utask = NULL; t->utask = NULL;
t->uprobe_srcu_id = -1;
} }
/* /*
...@@ -1513,9 +1502,6 @@ static struct uprobe *find_active_uprobe(unsigned long bp_vaddr, int *is_swbp) ...@@ -1513,9 +1502,6 @@ static struct uprobe *find_active_uprobe(unsigned long bp_vaddr, int *is_swbp)
} else { } else {
*is_swbp = -EFAULT; *is_swbp = -EFAULT;
} }
srcu_read_unlock_raw(&uprobes_srcu, current->uprobe_srcu_id);
current->uprobe_srcu_id = -1;
up_read(&mm->mmap_sem); up_read(&mm->mmap_sem);
return uprobe; return uprobe;
...@@ -1656,7 +1642,6 @@ int uprobe_pre_sstep_notifier(struct pt_regs *regs) ...@@ -1656,7 +1642,6 @@ int uprobe_pre_sstep_notifier(struct pt_regs *regs)
utask->state = UTASK_BP_HIT; utask->state = UTASK_BP_HIT;
set_thread_flag(TIF_UPROBE); set_thread_flag(TIF_UPROBE);
current->uprobe_srcu_id = srcu_read_lock_raw(&uprobes_srcu);
return 1; return 1;
} }
...@@ -1691,7 +1676,6 @@ static int __init init_uprobes(void) ...@@ -1691,7 +1676,6 @@ static int __init init_uprobes(void)
mutex_init(&uprobes_mutex[i]); mutex_init(&uprobes_mutex[i]);
mutex_init(&uprobes_mmap_mutex[i]); mutex_init(&uprobes_mmap_mutex[i]);
} }
init_srcu_struct(&uprobes_srcu);
return register_die_notifier(&uprobe_exception_nb); return register_die_notifier(&uprobe_exception_nb);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment