Commit e388466d authored by Kirill A. Shutemov's avatar Kirill A. Shutemov Committed by Linus Torvalds

mm: make remove_migration_ptes() beyond mm/migration.c

Make remove_migration_ptes() available to be used in split_huge_page().

New parameter 'locked' added: as with try_to_umap() we need a way to
indicate that caller holds rmap lock.

We also shouldn't try to mlock() pte-mapped huge pages: pte-mapeed THP
pages are never mlocked.
Signed-off-by: default avatarKirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 2a52bcbc
...@@ -243,6 +243,8 @@ int page_mkclean(struct page *); ...@@ -243,6 +243,8 @@ int page_mkclean(struct page *);
*/ */
int try_to_munlock(struct page *); int try_to_munlock(struct page *);
void remove_migration_ptes(struct page *old, struct page *new, bool locked);
/* /*
* Called by memory-failure.c to kill processes. * Called by memory-failure.c to kill processes.
*/ */
......
...@@ -172,7 +172,7 @@ static int remove_migration_pte(struct page *new, struct vm_area_struct *vma, ...@@ -172,7 +172,7 @@ static int remove_migration_pte(struct page *new, struct vm_area_struct *vma,
else else
page_add_file_rmap(new); page_add_file_rmap(new);
if (vma->vm_flags & VM_LOCKED) if (vma->vm_flags & VM_LOCKED && !PageTransCompound(new))
mlock_vma_page(new); mlock_vma_page(new);
/* No need to invalidate - it was non-present before */ /* No need to invalidate - it was non-present before */
...@@ -187,13 +187,16 @@ static int remove_migration_pte(struct page *new, struct vm_area_struct *vma, ...@@ -187,13 +187,16 @@ static int remove_migration_pte(struct page *new, struct vm_area_struct *vma,
* Get rid of all migration entries and replace them by * Get rid of all migration entries and replace them by
* references to the indicated page. * references to the indicated page.
*/ */
static void remove_migration_ptes(struct page *old, struct page *new) void remove_migration_ptes(struct page *old, struct page *new, bool locked)
{ {
struct rmap_walk_control rwc = { struct rmap_walk_control rwc = {
.rmap_one = remove_migration_pte, .rmap_one = remove_migration_pte,
.arg = old, .arg = old,
}; };
if (locked)
rmap_walk_locked(new, &rwc);
else
rmap_walk(new, &rwc); rmap_walk(new, &rwc);
} }
...@@ -702,7 +705,7 @@ static int writeout(struct address_space *mapping, struct page *page) ...@@ -702,7 +705,7 @@ static int writeout(struct address_space *mapping, struct page *page)
* At this point we know that the migration attempt cannot * At this point we know that the migration attempt cannot
* be successful. * be successful.
*/ */
remove_migration_ptes(page, page); remove_migration_ptes(page, page, false);
rc = mapping->a_ops->writepage(page, &wbc); rc = mapping->a_ops->writepage(page, &wbc);
...@@ -900,7 +903,7 @@ static int __unmap_and_move(struct page *page, struct page *newpage, ...@@ -900,7 +903,7 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
if (page_was_mapped) if (page_was_mapped)
remove_migration_ptes(page, remove_migration_ptes(page,
rc == MIGRATEPAGE_SUCCESS ? newpage : page); rc == MIGRATEPAGE_SUCCESS ? newpage : page, false);
out_unlock_both: out_unlock_both:
unlock_page(newpage); unlock_page(newpage);
...@@ -1070,7 +1073,7 @@ static int unmap_and_move_huge_page(new_page_t get_new_page, ...@@ -1070,7 +1073,7 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
if (page_was_mapped) if (page_was_mapped)
remove_migration_ptes(hpage, remove_migration_ptes(hpage,
rc == MIGRATEPAGE_SUCCESS ? new_hpage : hpage); rc == MIGRATEPAGE_SUCCESS ? new_hpage : hpage, false);
unlock_page(new_hpage); unlock_page(new_hpage);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment