Commit 9008d35b authored by Andrew Morton's avatar Andrew Morton Committed by Linus Torvalds

[PATCH] hugepage: fix add_to_page_cache() error handling

From: David Gibson <david@gibson.dropbear.id.au>

add_to_page_cache() locks the given page if and only if it suceeds.  The
hugepage code (every arch), however, does an unlock_page() after
add_to_page_cache() before checking the return code, which could trip the
BUG() in unlock_page() if add_to_page_cache() failed.

In practice we've never hit this bug, because the only ways
add_to_page_cache() can fail are when we fail to allocate a radix tree node
(very rare), or when there is already a page at that offset in the radix
tree, which never happens during prefault, obviously.  We should probably
fix it anyway, though.

The analagous bug in some of the patches floating about to
demand-allocation of hugepages is more of a problem, because multiple
processes can race to instantiate a particular page in the radix tree -
that's been hit at least once (which is how I found this).
parent d7553443
...@@ -264,8 +264,9 @@ int hugetlb_prefault(struct address_space *mapping, struct vm_area_struct *vma) ...@@ -264,8 +264,9 @@ int hugetlb_prefault(struct address_space *mapping, struct vm_area_struct *vma)
goto out; goto out;
} }
ret = add_to_page_cache(page, mapping, idx, GFP_ATOMIC); ret = add_to_page_cache(page, mapping, idx, GFP_ATOMIC);
unlock_page(page); if (! ret) {
if (ret) { unlock_page(page);
} else {
hugetlb_put_quota(mapping); hugetlb_put_quota(mapping);
free_huge_page(page); free_huge_page(page);
goto out; goto out;
......
...@@ -293,8 +293,9 @@ int hugetlb_prefault(struct address_space *mapping, struct vm_area_struct *vma) ...@@ -293,8 +293,9 @@ int hugetlb_prefault(struct address_space *mapping, struct vm_area_struct *vma)
goto out; goto out;
} }
ret = add_to_page_cache(page, mapping, idx, GFP_ATOMIC); ret = add_to_page_cache(page, mapping, idx, GFP_ATOMIC);
unlock_page(page); if (! ret) {
if (ret) { unlock_page(page);
} else {
hugetlb_put_quota(mapping); hugetlb_put_quota(mapping);
free_huge_page(page); free_huge_page(page);
goto out; goto out;
......
...@@ -452,8 +452,9 @@ int hugetlb_prefault(struct address_space *mapping, struct vm_area_struct *vma) ...@@ -452,8 +452,9 @@ int hugetlb_prefault(struct address_space *mapping, struct vm_area_struct *vma)
goto out; goto out;
} }
ret = add_to_page_cache(page, mapping, idx, GFP_ATOMIC); ret = add_to_page_cache(page, mapping, idx, GFP_ATOMIC);
unlock_page(page); if (! ret) {
if (ret) { unlock_page(page);
} else {
hugetlb_put_quota(mapping); hugetlb_put_quota(mapping);
free_huge_page(page); free_huge_page(page);
goto out; goto out;
......
...@@ -248,8 +248,9 @@ int hugetlb_prefault(struct address_space *mapping, struct vm_area_struct *vma) ...@@ -248,8 +248,9 @@ int hugetlb_prefault(struct address_space *mapping, struct vm_area_struct *vma)
goto out; goto out;
} }
ret = add_to_page_cache(page, mapping, idx, GFP_ATOMIC); ret = add_to_page_cache(page, mapping, idx, GFP_ATOMIC);
unlock_page(page); if (! ret) {
if (ret) { unlock_page(page);
} else {
hugetlb_put_quota(mapping); hugetlb_put_quota(mapping);
free_huge_page(page); free_huge_page(page);
goto out; goto out;
......
...@@ -245,8 +245,9 @@ int hugetlb_prefault(struct address_space *mapping, struct vm_area_struct *vma) ...@@ -245,8 +245,9 @@ int hugetlb_prefault(struct address_space *mapping, struct vm_area_struct *vma)
goto out; goto out;
} }
ret = add_to_page_cache(page, mapping, idx, GFP_ATOMIC); ret = add_to_page_cache(page, mapping, idx, GFP_ATOMIC);
unlock_page(page); if (! ret) {
if (ret) { unlock_page(page);
} else {
hugetlb_put_quota(mapping); hugetlb_put_quota(mapping);
free_huge_page(page); free_huge_page(page);
goto out; goto out;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment