Commit 248db92d authored by Kirill A. Shutemov's avatar Kirill A. Shutemov Committed by Linus Torvalds

migrate_pages: try to split pages on queuing

We are not able to migrate THPs.  It means it's not enough to split only
PMD on migration -- we need to split compound page under it too.
Signed-off-by: default avatarKirill A. Shutemov <kirill.shutemov@linux.intel.com>
Tested-by: default avatarAneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Acked-by: default avatarJerome Marchand <jmarchan@redhat.com>
Cc: Sasha Levin <sasha.levin@oracle.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Dave Hansen <dave.hansen@intel.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Rik van Riel <riel@redhat.com>
Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Cc: Steve Capper <steve.capper@linaro.org>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Michal Hocko <mhocko@suse.cz>
Cc: Christoph Lameter <cl@linux.com>
Cc: David Rientjes <rientjes@google.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent e9b61f19
...@@ -489,14 +489,33 @@ static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr, ...@@ -489,14 +489,33 @@ static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
struct page *page; struct page *page;
struct queue_pages *qp = walk->private; struct queue_pages *qp = walk->private;
unsigned long flags = qp->flags; unsigned long flags = qp->flags;
int nid; int nid, ret;
pte_t *pte; pte_t *pte;
spinlock_t *ptl; spinlock_t *ptl;
split_huge_pmd(vma, pmd, addr); if (pmd_trans_huge(*pmd)) {
if (pmd_trans_unstable(pmd)) ptl = pmd_lock(walk->mm, pmd);
return 0; if (pmd_trans_huge(*pmd)) {
page = pmd_page(*pmd);
if (is_huge_zero_page(page)) {
spin_unlock(ptl);
split_huge_pmd(vma, pmd, addr);
} else {
get_page(page);
spin_unlock(ptl);
lock_page(page);
ret = split_huge_page(page);
unlock_page(page);
put_page(page);
if (ret)
return 0;
}
} else {
spin_unlock(ptl);
}
}
retry:
pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
for (; addr != end; pte++, addr += PAGE_SIZE) { for (; addr != end; pte++, addr += PAGE_SIZE) {
if (!pte_present(*pte)) if (!pte_present(*pte))
...@@ -513,6 +532,21 @@ static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr, ...@@ -513,6 +532,21 @@ static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
nid = page_to_nid(page); nid = page_to_nid(page);
if (node_isset(nid, *qp->nmask) == !!(flags & MPOL_MF_INVERT)) if (node_isset(nid, *qp->nmask) == !!(flags & MPOL_MF_INVERT))
continue; continue;
if (PageTail(page) && PageAnon(page)) {
get_page(page);
pte_unmap_unlock(pte, ptl);
lock_page(page);
ret = split_huge_page(page);
unlock_page(page);
put_page(page);
/* Failed to split -- skip. */
if (ret) {
pte = pte_offset_map_lock(walk->mm, pmd,
addr, &ptl);
continue;
}
goto retry;
}
if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
migrate_page_add(page, qp->pagelist, flags); migrate_page_add(page, qp->pagelist, flags);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment