Commit c50ac050 authored by Dave Hansen's avatar Dave Hansen Committed by Linus Torvalds

hugetlb: fix resv_map leak in error path

When called for anonymous (non-shared) mappings, hugetlb_reserve_pages()
does a resv_map_alloc().  It depends on code in hugetlbfs's
vm_ops->close() to release that allocation.

However, in the mmap() failure path, we do a plain unmap_region() without
the remove_vma() which actually calls vm_ops->close().

This is a decent fix.  This leak could get reintroduced if new code (say,
after hugetlb_reserve_pages() in hugetlbfs_file_mmap()) decides to return
an error.  But, I think it would have to unroll the reservation anyway.

Christoph's test case:

	http://marc.info/?l=linux-mm&m=133728900729735

This patch applies to 3.4 and later.  A version for earlier kernels is at
https://lkml.org/lkml/2012/5/22/418.
Signed-off-by: default avatarDave Hansen <dave@linux.vnet.ibm.com>
Acked-by: default avatarMel Gorman <mel@csn.ul.ie>
Acked-by: default avatarKOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Reported-by: default avatarChristoph Lameter <cl@linux.com>
Tested-by: default avatarChristoph Lameter <cl@linux.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: <stable@vger.kernel.org>	[2.6.32+]
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 5c2b8a16
...@@ -2157,6 +2157,15 @@ static void hugetlb_vm_op_open(struct vm_area_struct *vma) ...@@ -2157,6 +2157,15 @@ static void hugetlb_vm_op_open(struct vm_area_struct *vma)
kref_get(&reservations->refs); kref_get(&reservations->refs);
} }
static void resv_map_put(struct vm_area_struct *vma)
{
struct resv_map *reservations = vma_resv_map(vma);
if (!reservations)
return;
kref_put(&reservations->refs, resv_map_release);
}
static void hugetlb_vm_op_close(struct vm_area_struct *vma) static void hugetlb_vm_op_close(struct vm_area_struct *vma)
{ {
struct hstate *h = hstate_vma(vma); struct hstate *h = hstate_vma(vma);
...@@ -2173,7 +2182,7 @@ static void hugetlb_vm_op_close(struct vm_area_struct *vma) ...@@ -2173,7 +2182,7 @@ static void hugetlb_vm_op_close(struct vm_area_struct *vma)
reserve = (end - start) - reserve = (end - start) -
region_count(&reservations->regions, start, end); region_count(&reservations->regions, start, end);
kref_put(&reservations->refs, resv_map_release); resv_map_put(vma);
if (reserve) { if (reserve) {
hugetlb_acct_memory(h, -reserve); hugetlb_acct_memory(h, -reserve);
...@@ -2991,12 +3000,16 @@ int hugetlb_reserve_pages(struct inode *inode, ...@@ -2991,12 +3000,16 @@ int hugetlb_reserve_pages(struct inode *inode,
set_vma_resv_flags(vma, HPAGE_RESV_OWNER); set_vma_resv_flags(vma, HPAGE_RESV_OWNER);
} }
if (chg < 0) if (chg < 0) {
return chg; ret = chg;
goto out_err;
}
/* There must be enough pages in the subpool for the mapping */ /* There must be enough pages in the subpool for the mapping */
if (hugepage_subpool_get_pages(spool, chg)) if (hugepage_subpool_get_pages(spool, chg)) {
return -ENOSPC; ret = -ENOSPC;
goto out_err;
}
/* /*
* Check enough hugepages are available for the reservation. * Check enough hugepages are available for the reservation.
...@@ -3005,7 +3018,7 @@ int hugetlb_reserve_pages(struct inode *inode, ...@@ -3005,7 +3018,7 @@ int hugetlb_reserve_pages(struct inode *inode,
ret = hugetlb_acct_memory(h, chg); ret = hugetlb_acct_memory(h, chg);
if (ret < 0) { if (ret < 0) {
hugepage_subpool_put_pages(spool, chg); hugepage_subpool_put_pages(spool, chg);
return ret; goto out_err;
} }
/* /*
...@@ -3022,6 +3035,9 @@ int hugetlb_reserve_pages(struct inode *inode, ...@@ -3022,6 +3035,9 @@ int hugetlb_reserve_pages(struct inode *inode,
if (!vma || vma->vm_flags & VM_MAYSHARE) if (!vma || vma->vm_flags & VM_MAYSHARE)
region_add(&inode->i_mapping->private_list, from, to); region_add(&inode->i_mapping->private_list, from, to);
return 0; return 0;
out_err:
resv_map_put(vma);
return ret;
} }
void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed) void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment