Commit 2039e7b5 authored by Andrew Morton's avatar Andrew Morton Committed by Linus Torvalds

[PATCH] mremap: vma_relink_file race fix

From: Hugh Dickins <hugh@veritas.com>

Subtle point from Rajesh Venkatasubramanian: when mremap's move_vma fails and
so rewinds, before moving the file-based ptes back, we must move new_vma
before old vma in the i_mmap or i_mmap_shared list, so that when racing
against vmtruncate we cannot propagate pages to be truncated back from
new_vma into the just cleaned old_vma.
parent e2ea8374
...@@ -543,6 +543,7 @@ extern void __vma_link_rb(struct mm_struct *, struct vm_area_struct *, ...@@ -543,6 +543,7 @@ extern void __vma_link_rb(struct mm_struct *, struct vm_area_struct *,
struct rb_node **, struct rb_node *); struct rb_node **, struct rb_node *);
extern struct vm_area_struct *copy_vma(struct vm_area_struct *, extern struct vm_area_struct *copy_vma(struct vm_area_struct *,
unsigned long addr, unsigned long len, unsigned long pgoff); unsigned long addr, unsigned long len, unsigned long pgoff);
extern void vma_relink_file(struct vm_area_struct *, struct vm_area_struct *);
extern void exit_mmap(struct mm_struct *); extern void exit_mmap(struct mm_struct *);
extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long); extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
......
...@@ -1525,3 +1525,24 @@ struct vm_area_struct *copy_vma(struct vm_area_struct *vma, ...@@ -1525,3 +1525,24 @@ struct vm_area_struct *copy_vma(struct vm_area_struct *vma,
} }
return new_vma; return new_vma;
} }
/*
* Position vma after prev in shared file list:
* for mremap move error recovery racing against vmtruncate.
*/
void vma_relink_file(struct vm_area_struct *vma, struct vm_area_struct *prev)
{
struct mm_struct *mm = vma->vm_mm;
struct address_space *mapping;
if (vma->vm_file) {
mapping = vma->vm_file->f_mapping;
if (mapping) {
down(&mapping->i_shared_sem);
spin_lock(&mm->page_table_lock);
list_move(&vma->shared, &prev->shared);
spin_unlock(&mm->page_table_lock);
up(&mapping->i_shared_sem);
}
}
}
...@@ -187,7 +187,14 @@ static unsigned long move_vma(struct vm_area_struct *vma, ...@@ -187,7 +187,14 @@ static unsigned long move_vma(struct vm_area_struct *vma,
* On error, move entries back from new area to old, * On error, move entries back from new area to old,
* which will succeed since page tables still there, * which will succeed since page tables still there,
* and then proceed to unmap new area instead of old. * and then proceed to unmap new area instead of old.
*
* Subtle point from Rajesh Venkatasubramanian: before
* moving file-based ptes, move new_vma before old vma
* in the i_mmap or i_mmap_shared list, so when racing
* against vmtruncate we cannot propagate pages to be
* truncated back from new_vma into just cleaned old.
*/ */
vma_relink_file(vma, new_vma);
move_page_tables(new_vma, old_addr, new_addr, moved_len); move_page_tables(new_vma, old_addr, new_addr, moved_len);
vma = new_vma; vma = new_vma;
old_len = new_len; old_len = new_len;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment