Commit e2d8cf40 authored by Naoya Horiguchi's avatar Naoya Horiguchi Committed by Linus Torvalds

migrate: add hugepage migration code to migrate_pages()

Extend check_range() to handle vma with VM_HUGETLB set.  We will be able
to migrate hugepage with migrate_pages(2) after applying the enablement
patch which comes later in this series.

Note that for larger hugepages (covered by pud entries, 1GB for x86_64 for
example), we simply skip it now.

Note that using pmd_huge/pud_huge assumes that hugepages are pointed to by
pmd/pud.  This is not true in some architectures implementing hugepage
with other mechanisms like ia64, but it's OK because pmd_huge/pud_huge
simply return 0 in such arch and page walker simply ignores such
hugepages.
Signed-off-by: default avatarNaoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Acked-by: default avatarAndi Kleen <ak@linux.intel.com>
Reviewed-by: default avatarWanpeng Li <liwanp@linux.vnet.ibm.com>
Acked-by: default avatarHillf Danton <dhillf@gmail.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Hugh Dickins <hughd@google.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Michal Hocko <mhocko@suse.cz>
Cc: Rik van Riel <riel@redhat.com>
Cc: "Aneesh Kumar K.V" <aneesh.kumar@linux.vnet.ibm.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent b8ec1cee
...@@ -515,6 +515,30 @@ static int check_pte_range(struct vm_area_struct *vma, pmd_t *pmd, ...@@ -515,6 +515,30 @@ static int check_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
return addr != end; return addr != end;
} }
static void check_hugetlb_pmd_range(struct vm_area_struct *vma, pmd_t *pmd,
const nodemask_t *nodes, unsigned long flags,
void *private)
{
#ifdef CONFIG_HUGETLB_PAGE
int nid;
struct page *page;
spin_lock(&vma->vm_mm->page_table_lock);
page = pte_page(huge_ptep_get((pte_t *)pmd));
nid = page_to_nid(page);
if (node_isset(nid, *nodes) == !!(flags & MPOL_MF_INVERT))
goto unlock;
/* With MPOL_MF_MOVE, we migrate only unshared hugepage. */
if (flags & (MPOL_MF_MOVE_ALL) ||
(flags & MPOL_MF_MOVE && page_mapcount(page) == 1))
isolate_huge_page(page, private);
unlock:
spin_unlock(&vma->vm_mm->page_table_lock);
#else
BUG();
#endif
}
static inline int check_pmd_range(struct vm_area_struct *vma, pud_t *pud, static inline int check_pmd_range(struct vm_area_struct *vma, pud_t *pud,
unsigned long addr, unsigned long end, unsigned long addr, unsigned long end,
const nodemask_t *nodes, unsigned long flags, const nodemask_t *nodes, unsigned long flags,
...@@ -526,6 +550,13 @@ static inline int check_pmd_range(struct vm_area_struct *vma, pud_t *pud, ...@@ -526,6 +550,13 @@ static inline int check_pmd_range(struct vm_area_struct *vma, pud_t *pud,
pmd = pmd_offset(pud, addr); pmd = pmd_offset(pud, addr);
do { do {
next = pmd_addr_end(addr, end); next = pmd_addr_end(addr, end);
if (!pmd_present(*pmd))
continue;
if (pmd_huge(*pmd) && is_vm_hugetlb_page(vma)) {
check_hugetlb_pmd_range(vma, pmd, nodes,
flags, private);
continue;
}
split_huge_page_pmd(vma, addr, pmd); split_huge_page_pmd(vma, addr, pmd);
if (pmd_none_or_trans_huge_or_clear_bad(pmd)) if (pmd_none_or_trans_huge_or_clear_bad(pmd))
continue; continue;
...@@ -547,6 +578,8 @@ static inline int check_pud_range(struct vm_area_struct *vma, pgd_t *pgd, ...@@ -547,6 +578,8 @@ static inline int check_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
pud = pud_offset(pgd, addr); pud = pud_offset(pgd, addr);
do { do {
next = pud_addr_end(addr, end); next = pud_addr_end(addr, end);
if (pud_huge(*pud) && is_vm_hugetlb_page(vma))
continue;
if (pud_none_or_clear_bad(pud)) if (pud_none_or_clear_bad(pud))
continue; continue;
if (check_pmd_range(vma, pud, addr, next, nodes, if (check_pmd_range(vma, pud, addr, next, nodes,
...@@ -638,9 +671,6 @@ check_range(struct mm_struct *mm, unsigned long start, unsigned long end, ...@@ -638,9 +671,6 @@ check_range(struct mm_struct *mm, unsigned long start, unsigned long end,
return ERR_PTR(-EFAULT); return ERR_PTR(-EFAULT);
} }
if (is_vm_hugetlb_page(vma))
goto next;
if (flags & MPOL_MF_LAZY) { if (flags & MPOL_MF_LAZY) {
change_prot_numa(vma, start, endvma); change_prot_numa(vma, start, endvma);
goto next; goto next;
...@@ -993,6 +1023,10 @@ static void migrate_page_add(struct page *page, struct list_head *pagelist, ...@@ -993,6 +1023,10 @@ static void migrate_page_add(struct page *page, struct list_head *pagelist,
static struct page *new_node_page(struct page *page, unsigned long node, int **x) static struct page *new_node_page(struct page *page, unsigned long node, int **x)
{ {
if (PageHuge(page))
return alloc_huge_page_node(page_hstate(compound_head(page)),
node);
else
return alloc_pages_exact_node(node, GFP_HIGHUSER_MOVABLE, 0); return alloc_pages_exact_node(node, GFP_HIGHUSER_MOVABLE, 0);
} }
...@@ -1023,7 +1057,7 @@ static int migrate_to_node(struct mm_struct *mm, int source, int dest, ...@@ -1023,7 +1057,7 @@ static int migrate_to_node(struct mm_struct *mm, int source, int dest,
err = migrate_pages(&pagelist, new_node_page, dest, err = migrate_pages(&pagelist, new_node_page, dest,
MIGRATE_SYNC, MR_SYSCALL); MIGRATE_SYNC, MR_SYSCALL);
if (err) if (err)
putback_lru_pages(&pagelist); putback_movable_pages(&pagelist);
} }
return err; return err;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment