Commit 0ca1634d authored by Johannes Weiner's avatar Johannes Weiner Committed by Linus Torvalds

thp: mincore transparent hugepage support

Handle transparent huge page pmd entries natively instead of splitting
them into subpages.
Signed-off-by: default avatarJohannes Weiner <hannes@cmpxchg.org>
Signed-off-by: default avatarAndrea Arcangeli <aarcange@redhat.com>
Reviewed-by: default avatarRik van Riel <riel@redhat.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent f2d6bfe9
...@@ -19,6 +19,9 @@ extern struct page *follow_trans_huge_pmd(struct mm_struct *mm, ...@@ -19,6 +19,9 @@ extern struct page *follow_trans_huge_pmd(struct mm_struct *mm,
extern int zap_huge_pmd(struct mmu_gather *tlb, extern int zap_huge_pmd(struct mmu_gather *tlb,
struct vm_area_struct *vma, struct vm_area_struct *vma,
pmd_t *pmd); pmd_t *pmd);
extern int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
unsigned long addr, unsigned long end,
unsigned char *vec);
enum transparent_hugepage_flag { enum transparent_hugepage_flag {
TRANSPARENT_HUGEPAGE_FLAG, TRANSPARENT_HUGEPAGE_FLAG,
......
...@@ -923,6 +923,31 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, ...@@ -923,6 +923,31 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
return ret; return ret;
} }
int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
unsigned long addr, unsigned long end,
unsigned char *vec)
{
int ret = 0;
spin_lock(&vma->vm_mm->page_table_lock);
if (likely(pmd_trans_huge(*pmd))) {
ret = !pmd_trans_splitting(*pmd);
spin_unlock(&vma->vm_mm->page_table_lock);
if (unlikely(!ret))
wait_split_huge_page(vma->anon_vma, pmd);
else {
/*
* All logical pages in the range are present
* if backed by a huge page.
*/
memset(vec, 1, (end - addr) >> PAGE_SHIFT);
}
} else
spin_unlock(&vma->vm_mm->page_table_lock);
return ret;
}
pmd_t *page_check_address_pmd(struct page *page, pmd_t *page_check_address_pmd(struct page *page,
struct mm_struct *mm, struct mm_struct *mm,
unsigned long address, unsigned long address,
......
...@@ -154,7 +154,13 @@ static void mincore_pmd_range(struct vm_area_struct *vma, pud_t *pud, ...@@ -154,7 +154,13 @@ static void mincore_pmd_range(struct vm_area_struct *vma, pud_t *pud,
pmd = pmd_offset(pud, addr); pmd = pmd_offset(pud, addr);
do { do {
next = pmd_addr_end(addr, end); next = pmd_addr_end(addr, end);
split_huge_page_pmd(vma->vm_mm, pmd); if (pmd_trans_huge(*pmd)) {
if (mincore_huge_pmd(vma, pmd, addr, next, vec)) {
vec += (next - addr) >> PAGE_SHIFT;
continue;
}
/* fall through */
}
if (pmd_none_or_clear_bad(pmd)) if (pmd_none_or_clear_bad(pmd))
mincore_unmapped_range(vma, addr, next, vec); mincore_unmapped_range(vma, addr, next, vec);
else else
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment