Commit a3efc1fa authored by Andrew Morton's avatar Andrew Morton Committed by Linus Torvalds

[PATCH] follow_hugetlb_page fix

From: William Lee Irwin III <wli@holomorphy.com>

follow_hugetlb_page() drops out of the loop prematurely and fails to take the
appropriate refcounts if its starting address was not hugepage-aligned.

It looked a bit unclean too, so I rewrote it.  This fixes a bug, and more
importantly, makes the thing readable by something other than a compiler
(e.g.  programmers).
parent cda55f33
......@@ -129,37 +129,45 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
int
follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
struct page **pages, struct vm_area_struct **vmas,
unsigned long *st, int *length, int i)
unsigned long *position, int *length, int i)
{
pte_t *ptep, pte;
unsigned long start = *st;
unsigned long pstart;
int len = *length;
struct page *page;
unsigned long vpfn, vaddr = *position;
int remainder = *length;
WARN_ON(!is_vm_hugetlb_page(vma));
do {
pstart = start;
ptep = huge_pte_offset(mm, start);
pte = *ptep;
vpfn = vaddr/PAGE_SIZE;
while (vaddr < vma->vm_end && remainder) {
back1:
page = pte_page(pte);
if (pages) {
page += ((start & ~HPAGE_MASK) >> PAGE_SHIFT);
pte_t *pte;
struct page *page;
pte = huge_pte_offset(mm, vaddr);
/* hugetlb should be locked, and hence, prefaulted */
WARN_ON(!pte || pte_none(*pte));
page = &pte_page(*pte)[vpfn % (HPAGE_SIZE/PAGE_SIZE)];
WARN_ON(!PageCompound(page));
get_page(page);
pages[i] = page;
}
if (vmas)
vmas[i] = vma;
i++;
len--;
start += PAGE_SIZE;
if (((start & HPAGE_MASK) == pstart) && len &&
(start < vma->vm_end))
goto back1;
} while (len && start < vma->vm_end);
*length = len;
*st = start;
vaddr += PAGE_SIZE;
++vpfn;
--remainder;
++i;
}
*length = remainder;
*position = vaddr;
return i;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment