Commit f1a45d02 authored by Oleg Nesterov's avatar Oleg Nesterov

uprobes: Kill dup_mmap()->uprobe_mmap(), simplify uprobe_mmap/munmap

1. Kill dup_mmap()->uprobe_mmap(), it was only needed to calculate
   new_mm->uprobes_state.count removed by the previous patch.

   If the forking process has a pending uprobe (int3) in vma, it will
   be copied by copy_page_range(), note that it checks vma->anon_vma
   so "Don't copy ptes" is not possible after install_breakpoint()
   which does anon_vma_prepare().

2. Remove is_swbp_at_addr() and "int count" in uprobe_mmap(). Again,
   this was needed for uprobes_state.count.

   As a side effect this fixes the bug pointed out by Srikar,
   this code lacked the necessary put_uprobe().

3. uprobe_munmap() becomes a nop after the previous patch. Remove the
   meaningless code but do not remove the helper, we will need it.
Signed-off-by: default avatarOleg Nesterov <oleg@redhat.com>
Acked-by: default avatarSrikar Dronamraju <srikar@linux.vnet.ibm.com>
parent 647c42df
...@@ -1010,7 +1010,7 @@ int uprobe_mmap(struct vm_area_struct *vma) ...@@ -1010,7 +1010,7 @@ int uprobe_mmap(struct vm_area_struct *vma)
struct list_head tmp_list; struct list_head tmp_list;
struct uprobe *uprobe, *u; struct uprobe *uprobe, *u;
struct inode *inode; struct inode *inode;
int ret, count; int ret;
if (!atomic_read(&uprobe_events) || !valid_vma(vma, true)) if (!atomic_read(&uprobe_events) || !valid_vma(vma, true))
return 0; return 0;
...@@ -1023,8 +1023,6 @@ int uprobe_mmap(struct vm_area_struct *vma) ...@@ -1023,8 +1023,6 @@ int uprobe_mmap(struct vm_area_struct *vma)
build_probe_list(inode, vma, vma->vm_start, vma->vm_end, &tmp_list); build_probe_list(inode, vma, vma->vm_start, vma->vm_end, &tmp_list);
ret = 0; ret = 0;
count = 0;
list_for_each_entry_safe(uprobe, u, &tmp_list, pending_list) { list_for_each_entry_safe(uprobe, u, &tmp_list, pending_list) {
if (!ret) { if (!ret) {
unsigned long vaddr = offset_to_vaddr(vma, uprobe->offset); unsigned long vaddr = offset_to_vaddr(vma, uprobe->offset);
...@@ -1034,19 +1032,11 @@ int uprobe_mmap(struct vm_area_struct *vma) ...@@ -1034,19 +1032,11 @@ int uprobe_mmap(struct vm_area_struct *vma)
* We can race against uprobe_register(), see the * We can race against uprobe_register(), see the
* comment near uprobe_hash(). * comment near uprobe_hash().
*/ */
if (ret == -EEXIST) { if (ret == -EEXIST)
ret = 0; ret = 0;
if (!is_swbp_at_addr(vma->vm_mm, vaddr))
continue;
}
if (!ret)
count++;
} }
put_uprobe(uprobe); put_uprobe(uprobe);
} }
mutex_unlock(uprobes_mmap_hash(inode)); mutex_unlock(uprobes_mmap_hash(inode));
return ret; return ret;
...@@ -1057,27 +1047,13 @@ int uprobe_mmap(struct vm_area_struct *vma) ...@@ -1057,27 +1047,13 @@ int uprobe_mmap(struct vm_area_struct *vma)
*/ */
void uprobe_munmap(struct vm_area_struct *vma, unsigned long start, unsigned long end) void uprobe_munmap(struct vm_area_struct *vma, unsigned long start, unsigned long end)
{ {
struct list_head tmp_list;
struct uprobe *uprobe, *u;
struct inode *inode;
if (!atomic_read(&uprobe_events) || !valid_vma(vma, false)) if (!atomic_read(&uprobe_events) || !valid_vma(vma, false))
return; return;
if (!atomic_read(&vma->vm_mm->mm_users)) /* called by mmput() ? */ if (!atomic_read(&vma->vm_mm->mm_users)) /* called by mmput() ? */
return; return;
inode = vma->vm_file->f_mapping->host; /* TODO: unmapping uprobe(s) will need more work */
if (!inode)
return;
mutex_lock(uprobes_mmap_hash(inode));
build_probe_list(inode, vma, start, end, &tmp_list);
list_for_each_entry_safe(uprobe, u, &tmp_list, pending_list) {
put_uprobe(uprobe);
}
mutex_unlock(uprobes_mmap_hash(inode));
} }
/* Slot allocation for XOL */ /* Slot allocation for XOL */
......
...@@ -454,9 +454,6 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) ...@@ -454,9 +454,6 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
if (retval) if (retval)
goto out; goto out;
if (file)
uprobe_mmap(tmp);
} }
/* a new mm has just been created */ /* a new mm has just been created */
arch_dup_mmap(oldmm, mm); arch_dup_mmap(oldmm, mm);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment