Commit 7c8de358 authored by Ethon Paul's avatar Ethon Paul Committed by Linus Torvalds

mm/hugetlb: fix a typos in comments

[akpm@linux-foundation.org: coding style fixes]
Signed-off-by: default avatarEthon Paul <ethp@qq.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Reviewed-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Reviewed-by: default avatarRalph Campbell <rcampbell@nvidia.com>
Link: http://lkml.kernel.org/r/20200410163714.14085-1-ethp@qq.comSigned-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent b4f315b4
...@@ -85,7 +85,7 @@ static inline void unlock_or_release_subpool(struct hugepage_subpool *spool) ...@@ -85,7 +85,7 @@ static inline void unlock_or_release_subpool(struct hugepage_subpool *spool)
spin_unlock(&spool->lock); spin_unlock(&spool->lock);
/* If no pages are used, and no other handles to the subpool /* If no pages are used, and no other handles to the subpool
* remain, give up any reservations mased on minimum size and * remain, give up any reservations based on minimum size and
* free the subpool */ * free the subpool */
if (free) { if (free) {
if (spool->min_hpages != -1) if (spool->min_hpages != -1)
...@@ -133,7 +133,7 @@ void hugepage_put_subpool(struct hugepage_subpool *spool) ...@@ -133,7 +133,7 @@ void hugepage_put_subpool(struct hugepage_subpool *spool)
* the request. Otherwise, return the number of pages by which the * the request. Otherwise, return the number of pages by which the
* global pools must be adjusted (upward). The returned value may * global pools must be adjusted (upward). The returned value may
* only be different than the passed value (delta) in the case where * only be different than the passed value (delta) in the case where
* a subpool minimum size must be manitained. * a subpool minimum size must be maintained.
*/ */
static long hugepage_subpool_get_pages(struct hugepage_subpool *spool, static long hugepage_subpool_get_pages(struct hugepage_subpool *spool,
long delta) long delta)
...@@ -473,7 +473,7 @@ static int allocate_file_region_entries(struct resv_map *resv, ...@@ -473,7 +473,7 @@ static int allocate_file_region_entries(struct resv_map *resv,
* *
* Return the number of new huge pages added to the map. This number is greater * Return the number of new huge pages added to the map. This number is greater
* than or equal to zero. If file_region entries needed to be allocated for * than or equal to zero. If file_region entries needed to be allocated for
* this operation and we were not able to allocate, it ruturns -ENOMEM. * this operation and we were not able to allocate, it returns -ENOMEM.
* region_add of regions of length 1 never allocate file_regions and cannot * region_add of regions of length 1 never allocate file_regions and cannot
* fail; region_chg will always allocate at least 1 entry and a region_add for * fail; region_chg will always allocate at least 1 entry and a region_add for
* 1 page will only require at most 1 entry. * 1 page will only require at most 1 entry.
...@@ -988,7 +988,7 @@ static bool vma_has_reserves(struct vm_area_struct *vma, long chg) ...@@ -988,7 +988,7 @@ static bool vma_has_reserves(struct vm_area_struct *vma, long chg)
* We know VM_NORESERVE is not set. Therefore, there SHOULD * We know VM_NORESERVE is not set. Therefore, there SHOULD
* be a region map for all pages. The only situation where * be a region map for all pages. The only situation where
* there is no region map is if a hole was punched via * there is no region map is if a hole was punched via
* fallocate. In this case, there really are no reverves to * fallocate. In this case, there really are no reserves to
* use. This situation is indicated if chg != 0. * use. This situation is indicated if chg != 0.
*/ */
if (chg) if (chg)
...@@ -1519,7 +1519,7 @@ static void prep_compound_gigantic_page(struct page *page, unsigned int order) ...@@ -1519,7 +1519,7 @@ static void prep_compound_gigantic_page(struct page *page, unsigned int order)
* For gigantic hugepages allocated through bootmem at * For gigantic hugepages allocated through bootmem at
* boot, it's safer to be consistent with the not-gigantic * boot, it's safer to be consistent with the not-gigantic
* hugepages and clear the PG_reserved bit from all tail pages * hugepages and clear the PG_reserved bit from all tail pages
* too. Otherwse drivers using get_user_pages() to access tail * too. Otherwise drivers using get_user_pages() to access tail
* pages may get the reference counting wrong if they see * pages may get the reference counting wrong if they see
* PG_reserved set on a tail page (despite the head page not * PG_reserved set on a tail page (despite the head page not
* having PG_reserved set). Enforcing this consistency between * having PG_reserved set). Enforcing this consistency between
...@@ -4579,9 +4579,9 @@ vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -4579,9 +4579,9 @@ vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
/* /*
* entry could be a migration/hwpoison entry at this point, so this * entry could be a migration/hwpoison entry at this point, so this
* check prevents the kernel from going below assuming that we have * check prevents the kernel from going below assuming that we have
* a active hugepage in pagecache. This goto expects the 2nd page fault, * an active hugepage in pagecache. This goto expects the 2nd page
* and is_hugetlb_entry_(migration|hwpoisoned) check will properly * fault, and is_hugetlb_entry_(migration|hwpoisoned) check will
* handle it. * properly handle it.
*/ */
if (!pte_present(entry)) if (!pte_present(entry))
goto out_mutex; goto out_mutex;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment