Commit fc148a5f authored by Rik van Riel's avatar Rik van Riel Committed by Linus Torvalds

mm: remove VM_LOCK_RMAP code

When a VMA is in an inconsistent state during setup or teardown, the worst
that can happen is that the rmap code will not be able to find the page.

The mapping is in the process of being torn down (PTEs just got
invalidated by munmap), or set up (no PTEs have been instantiated yet).

It is also impossible for the rmap code to follow a pointer to an already
freed VMA, because the rmap code holds the anon_vma->lock, which the VMA
teardown code needs to take before the VMA is removed from the anon_vma
chain.

Hence, we should not need the VM_LOCK_RMAP locking at all.
Signed-off-by: default avatarRik van Riel <riel@redhat.com>
Cc: Nick Piggin <npiggin@suse.de>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Larry Woodman <lwoodman@redhat.com>
Cc: Lee Schermerhorn <Lee.Schermerhorn@hp.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent c44b6743
...@@ -97,11 +97,7 @@ extern unsigned int kobjsize(const void *objp); ...@@ -97,11 +97,7 @@ extern unsigned int kobjsize(const void *objp);
#define VM_NORESERVE 0x00200000 /* should the VM suppress accounting */ #define VM_NORESERVE 0x00200000 /* should the VM suppress accounting */
#define VM_HUGETLB 0x00400000 /* Huge TLB Page VM */ #define VM_HUGETLB 0x00400000 /* Huge TLB Page VM */
#define VM_NONLINEAR 0x00800000 /* Is non-linear (remap_file_pages) */ #define VM_NONLINEAR 0x00800000 /* Is non-linear (remap_file_pages) */
#ifdef CONFIG_MMU
#define VM_LOCK_RMAP 0x01000000 /* Do not follow this rmap (mmu mmap) */
#else
#define VM_MAPPED_COPY 0x01000000 /* T if mapped copy of data (nommu mmap) */ #define VM_MAPPED_COPY 0x01000000 /* T if mapped copy of data (nommu mmap) */
#endif
#define VM_INSERTPAGE 0x02000000 /* The vma has had "vm_insert_page()" done on it */ #define VM_INSERTPAGE 0x02000000 /* The vma has had "vm_insert_page()" done on it */
#define VM_ALWAYSDUMP 0x04000000 /* Always include in core dumps */ #define VM_ALWAYSDUMP 0x04000000 /* Always include in core dumps */
......
...@@ -554,9 +554,7 @@ again: remove_next = 1 + (end > next->vm_end); ...@@ -554,9 +554,7 @@ again: remove_next = 1 + (end > next->vm_end);
*/ */
if (importer && !importer->anon_vma) { if (importer && !importer->anon_vma) {
/* Block reverse map lookups until things are set up. */ /* Block reverse map lookups until things are set up. */
importer->vm_flags |= VM_LOCK_RMAP;
if (anon_vma_clone(importer, vma)) { if (anon_vma_clone(importer, vma)) {
importer->vm_flags &= ~VM_LOCK_RMAP;
return -ENOMEM; return -ENOMEM;
} }
importer->anon_vma = anon_vma; importer->anon_vma = anon_vma;
...@@ -618,11 +616,6 @@ again: remove_next = 1 + (end > next->vm_end); ...@@ -618,11 +616,6 @@ again: remove_next = 1 + (end > next->vm_end);
__vma_unlink(mm, next, vma); __vma_unlink(mm, next, vma);
if (file) if (file)
__remove_shared_vm_struct(next, file, mapping); __remove_shared_vm_struct(next, file, mapping);
/*
* This VMA is now dead, no need for rmap to follow it.
* Call anon_vma_merge below, outside of i_mmap_lock.
*/
next->vm_flags |= VM_LOCK_RMAP;
} else if (insert) { } else if (insert) {
/* /*
* split_vma has split insert from vma, and needs * split_vma has split insert from vma, and needs
...@@ -635,20 +628,12 @@ again: remove_next = 1 + (end > next->vm_end); ...@@ -635,20 +628,12 @@ again: remove_next = 1 + (end > next->vm_end);
if (mapping) if (mapping)
spin_unlock(&mapping->i_mmap_lock); spin_unlock(&mapping->i_mmap_lock);
/*
* The current VMA has been set up. It is now safe for the
* rmap code to get from the pages to the ptes.
*/
if (anon_vma && importer)
importer->vm_flags &= ~VM_LOCK_RMAP;
if (remove_next) { if (remove_next) {
if (file) { if (file) {
fput(file); fput(file);
if (next->vm_flags & VM_EXECUTABLE) if (next->vm_flags & VM_EXECUTABLE)
removed_exe_file_vma(mm); removed_exe_file_vma(mm);
} }
/* Protected by mmap_sem and VM_LOCK_RMAP. */
if (next->anon_vma) if (next->anon_vma)
anon_vma_merge(vma, next); anon_vma_merge(vma, next);
mm->map_count--; mm->map_count--;
......
...@@ -329,18 +329,6 @@ vma_address(struct page *page, struct vm_area_struct *vma) ...@@ -329,18 +329,6 @@ vma_address(struct page *page, struct vm_area_struct *vma)
/* page should be within @vma mapping range */ /* page should be within @vma mapping range */
return -EFAULT; return -EFAULT;
} }
if (unlikely(vma->vm_flags & VM_LOCK_RMAP)) {
/*
* This VMA is being unlinked or is not yet linked into the
* VMA tree. Do not try to follow this rmap. This race
* condition can result in page_referenced() ignoring a
* reference or in try_to_unmap() failing to unmap a page.
* The VMA cannot be freed under us because we hold the
* anon_vma->lock, which the munmap code takes while
* unlinking the anon_vmas from the VMA.
*/
return -EFAULT;
}
return address; return address;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment