Commit 080dbb61 authored by Aneesh Kumar K.V's avatar Aneesh Kumar K.V Committed by Linus Torvalds

mm/follow_page_mask: split follow_page_mask to smaller functions.

Makes code reading easy.  No functional changes in this patch.  In a
followup patch, we will be updating the follow_page_mask to handle
hugetlb hugepd format so that archs like ppc64 can switch to the generic
version.  This split helps in doing that nicely.

Link: http://lkml.kernel.org/r/1494926612-23928-3-git-send-email-aneesh.kumar@linux.vnet.ibm.comSigned-off-by: default avatarAneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Reviewed-by: default avatarNaoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Cc: Anshuman Khandual <khandual@linux.vnet.ibm.com>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Mike Kravetz <kravetz@us.ibm.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 383321ab
...@@ -208,68 +208,16 @@ static struct page *follow_page_pte(struct vm_area_struct *vma, ...@@ -208,68 +208,16 @@ static struct page *follow_page_pte(struct vm_area_struct *vma,
return no_page_table(vma, flags); return no_page_table(vma, flags);
} }
/** static struct page *follow_pmd_mask(struct vm_area_struct *vma,
* follow_page_mask - look up a page descriptor from a user-virtual address unsigned long address, pud_t *pudp,
* @vma: vm_area_struct mapping @address unsigned int flags, unsigned int *page_mask)
* @address: virtual address to look up
* @flags: flags modifying lookup behaviour
* @page_mask: on output, *page_mask is set according to the size of the page
*
* @flags can have FOLL_ flags set, defined in <linux/mm.h>
*
* Returns the mapped (struct page *), %NULL if no mapping exists, or
* an error pointer if there is a mapping to something not represented
* by a page descriptor (see also vm_normal_page()).
*/
struct page *follow_page_mask(struct vm_area_struct *vma,
unsigned long address, unsigned int flags,
unsigned int *page_mask)
{ {
pgd_t *pgd;
p4d_t *p4d;
pud_t *pud;
pmd_t *pmd; pmd_t *pmd;
spinlock_t *ptl; spinlock_t *ptl;
struct page *page; struct page *page;
struct mm_struct *mm = vma->vm_mm; struct mm_struct *mm = vma->vm_mm;
*page_mask = 0; pmd = pmd_offset(pudp, address);
page = follow_huge_addr(mm, address, flags & FOLL_WRITE);
if (!IS_ERR(page)) {
BUG_ON(flags & FOLL_GET);
return page;
}
pgd = pgd_offset(mm, address);
if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
return no_page_table(vma, flags);
p4d = p4d_offset(pgd, address);
if (p4d_none(*p4d))
return no_page_table(vma, flags);
BUILD_BUG_ON(p4d_huge(*p4d));
if (unlikely(p4d_bad(*p4d)))
return no_page_table(vma, flags);
pud = pud_offset(p4d, address);
if (pud_none(*pud))
return no_page_table(vma, flags);
if (pud_huge(*pud) && vma->vm_flags & VM_HUGETLB) {
page = follow_huge_pud(mm, address, pud, flags);
if (page)
return page;
return no_page_table(vma, flags);
}
if (pud_devmap(*pud)) {
ptl = pud_lock(mm, pud);
page = follow_devmap_pud(vma, address, pud, flags);
spin_unlock(ptl);
if (page)
return page;
}
if (unlikely(pud_bad(*pud)))
return no_page_table(vma, flags);
pmd = pmd_offset(pud, address);
if (pmd_none(*pmd)) if (pmd_none(*pmd))
return no_page_table(vma, flags); return no_page_table(vma, flags);
if (pmd_huge(*pmd) && vma->vm_flags & VM_HUGETLB) { if (pmd_huge(*pmd) && vma->vm_flags & VM_HUGETLB) {
...@@ -319,13 +267,99 @@ struct page *follow_page_mask(struct vm_area_struct *vma, ...@@ -319,13 +267,99 @@ struct page *follow_page_mask(struct vm_area_struct *vma,
return ret ? ERR_PTR(ret) : return ret ? ERR_PTR(ret) :
follow_page_pte(vma, address, pmd, flags); follow_page_pte(vma, address, pmd, flags);
} }
page = follow_trans_huge_pmd(vma, address, pmd, flags); page = follow_trans_huge_pmd(vma, address, pmd, flags);
spin_unlock(ptl); spin_unlock(ptl);
*page_mask = HPAGE_PMD_NR - 1; *page_mask = HPAGE_PMD_NR - 1;
return page; return page;
} }
static struct page *follow_pud_mask(struct vm_area_struct *vma,
unsigned long address, p4d_t *p4dp,
unsigned int flags, unsigned int *page_mask)
{
pud_t *pud;
spinlock_t *ptl;
struct page *page;
struct mm_struct *mm = vma->vm_mm;
pud = pud_offset(p4dp, address);
if (pud_none(*pud))
return no_page_table(vma, flags);
if (pud_huge(*pud) && vma->vm_flags & VM_HUGETLB) {
page = follow_huge_pud(mm, address, pud, flags);
if (page)
return page;
return no_page_table(vma, flags);
}
if (pud_devmap(*pud)) {
ptl = pud_lock(mm, pud);
page = follow_devmap_pud(vma, address, pud, flags);
spin_unlock(ptl);
if (page)
return page;
}
if (unlikely(pud_bad(*pud)))
return no_page_table(vma, flags);
return follow_pmd_mask(vma, address, pud, flags, page_mask);
}
static struct page *follow_p4d_mask(struct vm_area_struct *vma,
unsigned long address, pgd_t *pgdp,
unsigned int flags, unsigned int *page_mask)
{
p4d_t *p4d;
p4d = p4d_offset(pgdp, address);
if (p4d_none(*p4d))
return no_page_table(vma, flags);
BUILD_BUG_ON(p4d_huge(*p4d));
if (unlikely(p4d_bad(*p4d)))
return no_page_table(vma, flags);
return follow_pud_mask(vma, address, p4d, flags, page_mask);
}
/**
* follow_page_mask - look up a page descriptor from a user-virtual address
* @vma: vm_area_struct mapping @address
* @address: virtual address to look up
* @flags: flags modifying lookup behaviour
* @page_mask: on output, *page_mask is set according to the size of the page
*
* @flags can have FOLL_ flags set, defined in <linux/mm.h>
*
* Returns the mapped (struct page *), %NULL if no mapping exists, or
* an error pointer if there is a mapping to something not represented
* by a page descriptor (see also vm_normal_page()).
*/
struct page *follow_page_mask(struct vm_area_struct *vma,
unsigned long address, unsigned int flags,
unsigned int *page_mask)
{
pgd_t *pgd;
struct page *page;
struct mm_struct *mm = vma->vm_mm;
*page_mask = 0;
/* make this handle hugepd */
page = follow_huge_addr(mm, address, flags & FOLL_WRITE);
if (!IS_ERR(page)) {
BUG_ON(flags & FOLL_GET);
return page;
}
pgd = pgd_offset(mm, address);
if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
return no_page_table(vma, flags);
return follow_p4d_mask(vma, address, pgd, flags, page_mask);
}
static int get_gate_page(struct mm_struct *mm, unsigned long address, static int get_gate_page(struct mm_struct *mm, unsigned long address,
unsigned int gup_flags, struct vm_area_struct **vma, unsigned int gup_flags, struct vm_area_struct **vma,
struct page **page) struct page **page)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment