Commit e0b2508d authored by Hugh Dickins's avatar Hugh Dickins Committed by Ben Hutchings

mm: fix crashes from mbind() merging vmas

commit d05f0cdc upstream.

In v2.6.34 commit 9d8cebd4 ("mm: fix mbind vma merge problem")
introduced vma merging to mbind(), but it should have also changed the
convention of passing start vma from queue_pages_range() (formerly
check_range()) to new_vma_page(): vma merging may have already freed
that structure, resulting in BUG at mm/mempolicy.c:1738 and probably
worse crashes.

Fixes: 9d8cebd4 ("mm: fix mbind vma merge problem")
Reported-by: default avatarNaoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Tested-by: default avatarNaoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Signed-off-by: default avatarHugh Dickins <hughd@google.com>
Acked-by: default avatarChristoph Lameter <cl@linux.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Minchan Kim <minchan.kim@gmail.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
[bwh: Backported to 3.2:
 - Adjust context
 - Keep the same arguments to migrate_pages() except for private=start]
Signed-off-by: default avatarBen Hutchings <ben@decadent.org.uk>
parent 386292b4
...@@ -566,24 +566,23 @@ static inline int check_pgd_range(struct vm_area_struct *vma, ...@@ -566,24 +566,23 @@ static inline int check_pgd_range(struct vm_area_struct *vma,
* If pagelist != NULL then isolate pages from the LRU and * If pagelist != NULL then isolate pages from the LRU and
* put them on the pagelist. * put them on the pagelist.
*/ */
static struct vm_area_struct * static int
check_range(struct mm_struct *mm, unsigned long start, unsigned long end, check_range(struct mm_struct *mm, unsigned long start, unsigned long end,
const nodemask_t *nodes, unsigned long flags, void *private) const nodemask_t *nodes, unsigned long flags, void *private)
{ {
int err; int err = 0;
struct vm_area_struct *first, *vma, *prev; struct vm_area_struct *vma, *prev;
first = find_vma(mm, start); vma = find_vma(mm, start);
if (!first) if (!vma)
return ERR_PTR(-EFAULT); return -EFAULT;
prev = NULL; prev = NULL;
for (vma = first; vma && vma->vm_start < end; vma = vma->vm_next) { for (; vma && vma->vm_start < end; vma = vma->vm_next) {
if (!(flags & MPOL_MF_DISCONTIG_OK)) { if (!(flags & MPOL_MF_DISCONTIG_OK)) {
if (!vma->vm_next && vma->vm_end < end) if (!vma->vm_next && vma->vm_end < end)
return ERR_PTR(-EFAULT); return -EFAULT;
if (prev && prev->vm_end < vma->vm_start) if (prev && prev->vm_end < vma->vm_start)
return ERR_PTR(-EFAULT); return -EFAULT;
} }
if (!is_vm_hugetlb_page(vma) && if (!is_vm_hugetlb_page(vma) &&
((flags & MPOL_MF_STRICT) || ((flags & MPOL_MF_STRICT) ||
...@@ -597,14 +596,12 @@ check_range(struct mm_struct *mm, unsigned long start, unsigned long end, ...@@ -597,14 +596,12 @@ check_range(struct mm_struct *mm, unsigned long start, unsigned long end,
start = vma->vm_start; start = vma->vm_start;
err = check_pgd_range(vma, start, endvma, nodes, err = check_pgd_range(vma, start, endvma, nodes,
flags, private); flags, private);
if (err) { if (err)
first = ERR_PTR(err);
break; break;
} }
}
prev = vma; prev = vma;
} }
return first; return err;
} }
/* /*
...@@ -1060,16 +1057,17 @@ int do_migrate_pages(struct mm_struct *mm, ...@@ -1060,16 +1057,17 @@ int do_migrate_pages(struct mm_struct *mm,
/* /*
* Allocate a new page for page migration based on vma policy. * Allocate a new page for page migration based on vma policy.
* Start assuming that page is mapped by vma pointed to by @private. * Start by assuming the page is mapped by the same vma as contains @start.
* Search forward from there, if not. N.B., this assumes that the * Search forward from there, if not. N.B., this assumes that the
* list of pages handed to migrate_pages()--which is how we get here-- * list of pages handed to migrate_pages()--which is how we get here--
* is in virtual address order. * is in virtual address order.
*/ */
static struct page *new_vma_page(struct page *page, unsigned long private, int **x) static struct page *new_page(struct page *page, unsigned long start, int **x)
{ {
struct vm_area_struct *vma = (struct vm_area_struct *)private; struct vm_area_struct *vma;
unsigned long uninitialized_var(address); unsigned long uninitialized_var(address);
vma = find_vma(current->mm, start);
while (vma) { while (vma) {
address = page_address_in_vma(page, vma); address = page_address_in_vma(page, vma);
if (address != -EFAULT) if (address != -EFAULT)
...@@ -1095,7 +1093,7 @@ int do_migrate_pages(struct mm_struct *mm, ...@@ -1095,7 +1093,7 @@ int do_migrate_pages(struct mm_struct *mm,
return -ENOSYS; return -ENOSYS;
} }
static struct page *new_vma_page(struct page *page, unsigned long private, int **x) static struct page *new_page(struct page *page, unsigned long start, int **x)
{ {
return NULL; return NULL;
} }
...@@ -1105,7 +1103,6 @@ static long do_mbind(unsigned long start, unsigned long len, ...@@ -1105,7 +1103,6 @@ static long do_mbind(unsigned long start, unsigned long len,
unsigned short mode, unsigned short mode_flags, unsigned short mode, unsigned short mode_flags,
nodemask_t *nmask, unsigned long flags) nodemask_t *nmask, unsigned long flags)
{ {
struct vm_area_struct *vma;
struct mm_struct *mm = current->mm; struct mm_struct *mm = current->mm;
struct mempolicy *new; struct mempolicy *new;
unsigned long end; unsigned long end;
...@@ -1169,19 +1166,16 @@ static long do_mbind(unsigned long start, unsigned long len, ...@@ -1169,19 +1166,16 @@ static long do_mbind(unsigned long start, unsigned long len,
if (err) if (err)
goto mpol_out; goto mpol_out;
vma = check_range(mm, start, end, nmask, err = check_range(mm, start, end, nmask,
flags | MPOL_MF_INVERT, &pagelist); flags | MPOL_MF_INVERT, &pagelist);
if (!err) {
err = PTR_ERR(vma);
if (!IS_ERR(vma)) {
int nr_failed = 0; int nr_failed = 0;
err = mbind_range(mm, start, end, new); err = mbind_range(mm, start, end, new);
if (!list_empty(&pagelist)) { if (!list_empty(&pagelist)) {
nr_failed = migrate_pages(&pagelist, new_vma_page, nr_failed = migrate_pages(&pagelist, new_page,
(unsigned long)vma, start, false, true);
false, true);
if (nr_failed) if (nr_failed)
putback_lru_pages(&pagelist); putback_lru_pages(&pagelist);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment