Commit 5502ea44 authored by Peter Xu's avatar Peter Xu Committed by Andrew Morton

mm/hugetlb: add page_mask for hugetlb_follow_page_mask()

follow_page() doesn't need it, but we'll start to need it when unifying
gup for hugetlb.

Link: https://lkml.kernel.org/r/20230628215310.73782-4-peterx@redhat.comSigned-off-by: default avatarPeter Xu <peterx@redhat.com>
Reviewed-by: default avatarDavid Hildenbrand <david@redhat.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: James Houghton <jthoughton@google.com>
Cc: Jason Gunthorpe <jgg@nvidia.com>
Cc: John Hubbard <jhubbard@nvidia.com>
Cc: Kirill A . Shutemov <kirill@shutemov.name>
Cc: Lorenzo Stoakes <lstoakes@gmail.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Mike Kravetz <mike.kravetz@oracle.com>
Cc: Mike Rapoport (IBM) <rppt@kernel.org>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Yang Shi <shy828301@gmail.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 458568c9
...@@ -131,7 +131,8 @@ int move_hugetlb_page_tables(struct vm_area_struct *vma, ...@@ -131,7 +131,8 @@ int move_hugetlb_page_tables(struct vm_area_struct *vma,
int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *, int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *,
struct vm_area_struct *, struct vm_area_struct *); struct vm_area_struct *, struct vm_area_struct *);
struct page *hugetlb_follow_page_mask(struct vm_area_struct *vma, struct page *hugetlb_follow_page_mask(struct vm_area_struct *vma,
unsigned long address, unsigned int flags); unsigned long address, unsigned int flags,
unsigned int *page_mask);
long follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *, long follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *,
struct page **, unsigned long *, unsigned long *, struct page **, unsigned long *, unsigned long *,
long, unsigned int, int *); long, unsigned int, int *);
...@@ -297,8 +298,9 @@ static inline void adjust_range_if_pmd_sharing_possible( ...@@ -297,8 +298,9 @@ static inline void adjust_range_if_pmd_sharing_possible(
{ {
} }
static inline struct page *hugetlb_follow_page_mask(struct vm_area_struct *vma, static inline struct page *hugetlb_follow_page_mask(
unsigned long address, unsigned int flags) struct vm_area_struct *vma, unsigned long address, unsigned int flags,
unsigned int *page_mask)
{ {
BUILD_BUG(); /* should never be compiled in if !CONFIG_HUGETLB_PAGE*/ BUILD_BUG(); /* should never be compiled in if !CONFIG_HUGETLB_PAGE*/
} }
......
...@@ -824,7 +824,8 @@ static struct page *follow_page_mask(struct vm_area_struct *vma, ...@@ -824,7 +824,8 @@ static struct page *follow_page_mask(struct vm_area_struct *vma,
* Ordinary GUP uses follow_hugetlb_page for hugetlb processing. * Ordinary GUP uses follow_hugetlb_page for hugetlb processing.
*/ */
if (is_vm_hugetlb_page(vma)) if (is_vm_hugetlb_page(vma))
return hugetlb_follow_page_mask(vma, address, flags); return hugetlb_follow_page_mask(vma, address, flags,
&ctx->page_mask);
pgd = pgd_offset(mm, address); pgd = pgd_offset(mm, address);
......
...@@ -6454,7 +6454,8 @@ static inline bool __follow_hugetlb_must_fault(struct vm_area_struct *vma, ...@@ -6454,7 +6454,8 @@ static inline bool __follow_hugetlb_must_fault(struct vm_area_struct *vma,
} }
struct page *hugetlb_follow_page_mask(struct vm_area_struct *vma, struct page *hugetlb_follow_page_mask(struct vm_area_struct *vma,
unsigned long address, unsigned int flags) unsigned long address, unsigned int flags,
unsigned int *page_mask)
{ {
struct hstate *h = hstate_vma(vma); struct hstate *h = hstate_vma(vma);
struct mm_struct *mm = vma->vm_mm; struct mm_struct *mm = vma->vm_mm;
...@@ -6504,6 +6505,8 @@ struct page *hugetlb_follow_page_mask(struct vm_area_struct *vma, ...@@ -6504,6 +6505,8 @@ struct page *hugetlb_follow_page_mask(struct vm_area_struct *vma,
page = ERR_PTR(ret); page = ERR_PTR(ret);
goto out; goto out;
} }
*page_mask = (1U << huge_page_order(h)) - 1;
} }
out: out:
spin_unlock(ptl); spin_unlock(ptl);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment