Commit af0ed73e authored by Joonsoo Kim's avatar Joonsoo Kim Committed by Linus Torvalds

mm, hugetlb: decrement reserve count if VM_NORESERVE alloc page cache

If a vma with VM_NORESERVE allocate a new page for page cache, we should
check whether this area is reserved or not.  If this address is already
reserved by other process(in case of chg == 0), we should decrement
reserve count, because this allocated page will go into page cache and
currently, there is no way to know that this page comes from reserved pool
or not when releasing inode.  This may introduce over-counting problem to
reserved count.  With following example code, you can easily reproduce
this situation.

Assume 2MB, nr_hugepages = 100

        size = 20 * MB;
        flag = MAP_SHARED;
        p = mmap(NULL, size, PROT_READ|PROT_WRITE, flag, fd, 0);
        if (p == MAP_FAILED) {
                fprintf(stderr, "mmap() failed: %s\n", strerror(errno));
                return -1;
        }

        flag = MAP_SHARED | MAP_NORESERVE;
        q = mmap(NULL, size, PROT_READ|PROT_WRITE, flag, fd, 0);
        if (q == MAP_FAILED) {
                fprintf(stderr, "mmap() failed: %s\n", strerror(errno));
        }
        q[0] = 'c';

After finish the program, run 'cat /proc/meminfo'.  You can see below
result.

HugePages_Free:      100
HugePages_Rsvd:        1

To fix this, we should check our mapping type and tracked region.  If our
mapping is VM_NORESERVE, VM_MAYSHARE and chg is 0, this imply that current
allocated page will go into page cache which is already reserved region
when mapping is created.  In this case, we should decrease reserve count.
As implementing above, this patch solve the problem.

[akpm@linux-foundation.org: fix spelling in comment]
Signed-off-by: default avatarJoonsoo Kim <iamjoonsoo.kim@lge.com>
Reviewed-by: default avatarWanpeng Li <liwanp@linux.vnet.ibm.com>
Reviewed-by: default avatarAneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Acked-by: default avatarHillf Danton <dhillf@gmail.com>
Acked-by: default avatarMichal Hocko <mhocko@suse.cz>
Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Cc: Rik van Riel <riel@redhat.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: "Aneesh Kumar K.V" <aneesh.kumar@linux.vnet.ibm.com>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Davidlohr Bueso <davidlohr.bueso@hp.com>
Cc: David Gibson <david@gibson.dropbear.id.au>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent a63884e9
...@@ -443,10 +443,23 @@ void reset_vma_resv_huge_pages(struct vm_area_struct *vma) ...@@ -443,10 +443,23 @@ void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
} }
/* Returns true if the VMA has associated reserve pages */ /* Returns true if the VMA has associated reserve pages */
static int vma_has_reserves(struct vm_area_struct *vma) static int vma_has_reserves(struct vm_area_struct *vma, long chg)
{ {
if (vma->vm_flags & VM_NORESERVE) if (vma->vm_flags & VM_NORESERVE) {
return 0; /*
* This address is already reserved by other process(chg == 0),
* so, we should decrement reserved count. Without decrementing,
* reserve count remains after releasing inode, because this
* allocated page will go into page cache and is regarded as
* coming from reserved pool in releasing step. Currently, we
* don't have any other solution to deal with this situation
* properly, so add work-around here.
*/
if (vma->vm_flags & VM_MAYSHARE && chg == 0)
return 1;
else
return 0;
}
/* Shared mappings always use reserves */ /* Shared mappings always use reserves */
if (vma->vm_flags & VM_MAYSHARE) if (vma->vm_flags & VM_MAYSHARE)
...@@ -520,7 +533,8 @@ static struct page *dequeue_huge_page_node(struct hstate *h, int nid) ...@@ -520,7 +533,8 @@ static struct page *dequeue_huge_page_node(struct hstate *h, int nid)
static struct page *dequeue_huge_page_vma(struct hstate *h, static struct page *dequeue_huge_page_vma(struct hstate *h,
struct vm_area_struct *vma, struct vm_area_struct *vma,
unsigned long address, int avoid_reserve) unsigned long address, int avoid_reserve,
long chg)
{ {
struct page *page = NULL; struct page *page = NULL;
struct mempolicy *mpol; struct mempolicy *mpol;
...@@ -535,7 +549,7 @@ static struct page *dequeue_huge_page_vma(struct hstate *h, ...@@ -535,7 +549,7 @@ static struct page *dequeue_huge_page_vma(struct hstate *h,
* have no page reserves. This check ensures that reservations are * have no page reserves. This check ensures that reservations are
* not "stolen". The child may still get SIGKILLed * not "stolen". The child may still get SIGKILLed
*/ */
if (!vma_has_reserves(vma) && if (!vma_has_reserves(vma, chg) &&
h->free_huge_pages - h->resv_huge_pages == 0) h->free_huge_pages - h->resv_huge_pages == 0)
goto err; goto err;
...@@ -553,8 +567,12 @@ static struct page *dequeue_huge_page_vma(struct hstate *h, ...@@ -553,8 +567,12 @@ static struct page *dequeue_huge_page_vma(struct hstate *h,
if (cpuset_zone_allowed_softwall(zone, htlb_alloc_mask)) { if (cpuset_zone_allowed_softwall(zone, htlb_alloc_mask)) {
page = dequeue_huge_page_node(h, zone_to_nid(zone)); page = dequeue_huge_page_node(h, zone_to_nid(zone));
if (page) { if (page) {
if (!avoid_reserve && vma_has_reserves(vma)) if (avoid_reserve)
h->resv_huge_pages--; break;
if (!vma_has_reserves(vma, chg))
break;
h->resv_huge_pages--;
break; break;
} }
} }
...@@ -1155,7 +1173,7 @@ static struct page *alloc_huge_page(struct vm_area_struct *vma, ...@@ -1155,7 +1173,7 @@ static struct page *alloc_huge_page(struct vm_area_struct *vma,
return ERR_PTR(-ENOSPC); return ERR_PTR(-ENOSPC);
} }
spin_lock(&hugetlb_lock); spin_lock(&hugetlb_lock);
page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve); page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve, chg);
if (!page) { if (!page) {
spin_unlock(&hugetlb_lock); spin_unlock(&hugetlb_lock);
page = alloc_buddy_huge_page(h, NUMA_NO_NODE); page = alloc_buddy_huge_page(h, NUMA_NO_NODE);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment