Commit 33b8f84a authored by Mike Kravetz's avatar Mike Kravetz Committed by Linus Torvalds

mm/hugetlb: change hugetlb_reserve_pages() to type bool

While reviewing a bug in hugetlb_reserve_pages, it was noticed that all
callers ignore the return value.  Any failure is considered an ENOMEM
error by the callers.

Change the function to be of type bool.  The function will return true if
the reservation was successful, false otherwise.  Callers currently assume
a zero return code indicates success.  Change the callers to look for true
to indicate success.  No functional change, only code cleanup.

Link: https://lkml.kernel.org/r/20201221192542.15732-1-mike.kravetz@oracle.comSigned-off-by: default avatarMike Kravetz <mike.kravetz@oracle.com>
Reviewed-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Cc: David Hildenbrand <david@redhat.com>
Cc: Dan Carpenter <dan.carpenter@oracle.com>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Davidlohr Bueso <dave@stgolabs.net>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent f8159c13
...@@ -171,7 +171,7 @@ static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma) ...@@ -171,7 +171,7 @@ static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
file_accessed(file); file_accessed(file);
ret = -ENOMEM; ret = -ENOMEM;
if (hugetlb_reserve_pages(inode, if (!hugetlb_reserve_pages(inode,
vma->vm_pgoff >> huge_page_order(h), vma->vm_pgoff >> huge_page_order(h),
len >> huge_page_shift(h), vma, len >> huge_page_shift(h), vma,
vma->vm_flags)) vma->vm_flags))
...@@ -1493,7 +1493,7 @@ struct file *hugetlb_file_setup(const char *name, size_t size, ...@@ -1493,7 +1493,7 @@ struct file *hugetlb_file_setup(const char *name, size_t size,
inode->i_size = size; inode->i_size = size;
clear_nlink(inode); clear_nlink(inode);
if (hugetlb_reserve_pages(inode, 0, if (!hugetlb_reserve_pages(inode, 0,
size >> huge_page_shift(hstate_inode(inode)), NULL, size >> huge_page_shift(hstate_inode(inode)), NULL,
acctflag)) acctflag))
file = ERR_PTR(-ENOMEM); file = ERR_PTR(-ENOMEM);
......
...@@ -139,7 +139,7 @@ int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm, pte_t *dst_pte, ...@@ -139,7 +139,7 @@ int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm, pte_t *dst_pte,
unsigned long dst_addr, unsigned long dst_addr,
unsigned long src_addr, unsigned long src_addr,
struct page **pagep); struct page **pagep);
int hugetlb_reserve_pages(struct inode *inode, long from, long to, bool hugetlb_reserve_pages(struct inode *inode, long from, long to,
struct vm_area_struct *vma, struct vm_area_struct *vma,
vm_flags_t vm_flags); vm_flags_t vm_flags);
long hugetlb_unreserve_pages(struct inode *inode, long start, long end, long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
......
...@@ -5016,12 +5016,13 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma, ...@@ -5016,12 +5016,13 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
return pages << h->order; return pages << h->order;
} }
int hugetlb_reserve_pages(struct inode *inode, /* Return true if reservation was successful, false otherwise. */
bool hugetlb_reserve_pages(struct inode *inode,
long from, long to, long from, long to,
struct vm_area_struct *vma, struct vm_area_struct *vma,
vm_flags_t vm_flags) vm_flags_t vm_flags)
{ {
long ret, chg, add = -1; long chg, add = -1;
struct hstate *h = hstate_inode(inode); struct hstate *h = hstate_inode(inode);
struct hugepage_subpool *spool = subpool_inode(inode); struct hugepage_subpool *spool = subpool_inode(inode);
struct resv_map *resv_map; struct resv_map *resv_map;
...@@ -5031,7 +5032,7 @@ int hugetlb_reserve_pages(struct inode *inode, ...@@ -5031,7 +5032,7 @@ int hugetlb_reserve_pages(struct inode *inode,
/* This should never happen */ /* This should never happen */
if (from > to) { if (from > to) {
VM_WARN(1, "%s called with a negative range\n", __func__); VM_WARN(1, "%s called with a negative range\n", __func__);
return -EINVAL; return false;
} }
/* /*
...@@ -5040,7 +5041,7 @@ int hugetlb_reserve_pages(struct inode *inode, ...@@ -5040,7 +5041,7 @@ int hugetlb_reserve_pages(struct inode *inode,
* without using reserves * without using reserves
*/ */
if (vm_flags & VM_NORESERVE) if (vm_flags & VM_NORESERVE)
return 0; return true;
/* /*
* Shared mappings base their reservation on the number of pages that * Shared mappings base their reservation on the number of pages that
...@@ -5062,7 +5063,7 @@ int hugetlb_reserve_pages(struct inode *inode, ...@@ -5062,7 +5063,7 @@ int hugetlb_reserve_pages(struct inode *inode,
/* Private mapping. */ /* Private mapping. */
resv_map = resv_map_alloc(); resv_map = resv_map_alloc();
if (!resv_map) if (!resv_map)
return -ENOMEM; return false;
chg = to - from; chg = to - from;
...@@ -5070,18 +5071,12 @@ int hugetlb_reserve_pages(struct inode *inode, ...@@ -5070,18 +5071,12 @@ int hugetlb_reserve_pages(struct inode *inode,
set_vma_resv_flags(vma, HPAGE_RESV_OWNER); set_vma_resv_flags(vma, HPAGE_RESV_OWNER);
} }
if (chg < 0) { if (chg < 0)
ret = chg;
goto out_err; goto out_err;
}
ret = hugetlb_cgroup_charge_cgroup_rsvd(
hstate_index(h), chg * pages_per_huge_page(h), &h_cg);
if (ret < 0) { if (hugetlb_cgroup_charge_cgroup_rsvd(hstate_index(h),
ret = -ENOMEM; chg * pages_per_huge_page(h), &h_cg) < 0)
goto out_err; goto out_err;
}
if (vma && !(vma->vm_flags & VM_MAYSHARE) && h_cg) { if (vma && !(vma->vm_flags & VM_MAYSHARE) && h_cg) {
/* For private mappings, the hugetlb_cgroup uncharge info hangs /* For private mappings, the hugetlb_cgroup uncharge info hangs
...@@ -5096,19 +5091,15 @@ int hugetlb_reserve_pages(struct inode *inode, ...@@ -5096,19 +5091,15 @@ int hugetlb_reserve_pages(struct inode *inode,
* reservations already in place (gbl_reserve). * reservations already in place (gbl_reserve).
*/ */
gbl_reserve = hugepage_subpool_get_pages(spool, chg); gbl_reserve = hugepage_subpool_get_pages(spool, chg);
if (gbl_reserve < 0) { if (gbl_reserve < 0)
ret = -ENOSPC;
goto out_uncharge_cgroup; goto out_uncharge_cgroup;
}
/* /*
* Check enough hugepages are available for the reservation. * Check enough hugepages are available for the reservation.
* Hand the pages back to the subpool if there are not * Hand the pages back to the subpool if there are not
*/ */
ret = hugetlb_acct_memory(h, gbl_reserve); if (hugetlb_acct_memory(h, gbl_reserve) < 0)
if (ret < 0) {
goto out_put_pages; goto out_put_pages;
}
/* /*
* Account for the reservations made. Shared mappings record regions * Account for the reservations made. Shared mappings record regions
...@@ -5126,7 +5117,6 @@ int hugetlb_reserve_pages(struct inode *inode, ...@@ -5126,7 +5117,6 @@ int hugetlb_reserve_pages(struct inode *inode,
if (unlikely(add < 0)) { if (unlikely(add < 0)) {
hugetlb_acct_memory(h, -gbl_reserve); hugetlb_acct_memory(h, -gbl_reserve);
ret = add;
goto out_put_pages; goto out_put_pages;
} else if (unlikely(chg > add)) { } else if (unlikely(chg > add)) {
/* /*
...@@ -5147,7 +5137,8 @@ int hugetlb_reserve_pages(struct inode *inode, ...@@ -5147,7 +5137,8 @@ int hugetlb_reserve_pages(struct inode *inode,
hugetlb_acct_memory(h, -rsv_adjust); hugetlb_acct_memory(h, -rsv_adjust);
} }
} }
return 0; return true;
out_put_pages: out_put_pages:
/* put back original number of pages, chg */ /* put back original number of pages, chg */
(void)hugepage_subpool_put_pages(spool, chg); (void)hugepage_subpool_put_pages(spool, chg);
...@@ -5163,7 +5154,7 @@ int hugetlb_reserve_pages(struct inode *inode, ...@@ -5163,7 +5154,7 @@ int hugetlb_reserve_pages(struct inode *inode,
region_abort(resv_map, from, to, regions_needed); region_abort(resv_map, from, to, regions_needed);
if (vma && is_vma_resv_set(vma, HPAGE_RESV_OWNER)) if (vma && is_vma_resv_set(vma, HPAGE_RESV_OWNER))
kref_put(&resv_map->refs, resv_map_release); kref_put(&resv_map->refs, resv_map_release);
return ret; return false;
} }
long hugetlb_unreserve_pages(struct inode *inode, long start, long end, long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment