Commit a98a2f0c authored by Alistair Popple's avatar Alistair Popple Committed by Linus Torvalds

mm/rmap: split migration into its own function

Migration is currently implemented as a mode of operation for
try_to_unmap_one() generally specified by passing the TTU_MIGRATION flag
or in the case of splitting a huge anonymous page TTU_SPLIT_FREEZE.

However it does not have much in common with the rest of the unmap
functionality of try_to_unmap_one() and thus splitting it into a separate
function reduces the complexity of try_to_unmap_one() making it more
readable.

Several simplifications can also be made in try_to_migrate_one() based on
the following observations:

 - All users of TTU_MIGRATION also set TTU_IGNORE_MLOCK.
 - No users of TTU_MIGRATION ever set TTU_IGNORE_HWPOISON.
 - No users of TTU_MIGRATION ever set TTU_BATCH_FLUSH.

TTU_SPLIT_FREEZE is a special case of migration used when splitting an
anonymous page.  This is most easily dealt with by calling the correct
function from unmap_page() in mm/huge_memory.c - either try_to_migrate()
for PageAnon or try_to_unmap().

Link: https://lkml.kernel.org/r/20210616105937.23201-5-apopple@nvidia.comSigned-off-by: default avatarAlistair Popple <apopple@nvidia.com>
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarRalph Campbell <rcampbell@nvidia.com>
Cc: Ben Skeggs <bskeggs@redhat.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Jason Gunthorpe <jgg@nvidia.com>
Cc: John Hubbard <jhubbard@nvidia.com>
Cc: "Matthew Wilcox (Oracle)" <willy@infradead.org>
Cc: Peter Xu <peterx@redhat.com>
Cc: Shakeel Butt <shakeelb@google.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent cd62734c
...@@ -86,8 +86,6 @@ struct anon_vma_chain { ...@@ -86,8 +86,6 @@ struct anon_vma_chain {
}; };
enum ttu_flags { enum ttu_flags {
TTU_MIGRATION = 0x1, /* migration mode */
TTU_SPLIT_HUGE_PMD = 0x4, /* split huge PMD if any */ TTU_SPLIT_HUGE_PMD = 0x4, /* split huge PMD if any */
TTU_IGNORE_MLOCK = 0x8, /* ignore mlock */ TTU_IGNORE_MLOCK = 0x8, /* ignore mlock */
TTU_SYNC = 0x10, /* avoid racy checks with PVMW_SYNC */ TTU_SYNC = 0x10, /* avoid racy checks with PVMW_SYNC */
...@@ -97,7 +95,6 @@ enum ttu_flags { ...@@ -97,7 +95,6 @@ enum ttu_flags {
* do a final flush if necessary */ * do a final flush if necessary */
TTU_RMAP_LOCKED = 0x80, /* do not grab rmap lock: TTU_RMAP_LOCKED = 0x80, /* do not grab rmap lock:
* caller holds it */ * caller holds it */
TTU_SPLIT_FREEZE = 0x100, /* freeze pte under splitting thp */
}; };
#ifdef CONFIG_MMU #ifdef CONFIG_MMU
...@@ -194,6 +191,7 @@ static inline void page_dup_rmap(struct page *page, bool compound) ...@@ -194,6 +191,7 @@ static inline void page_dup_rmap(struct page *page, bool compound)
int page_referenced(struct page *, int is_locked, int page_referenced(struct page *, int is_locked,
struct mem_cgroup *memcg, unsigned long *vm_flags); struct mem_cgroup *memcg, unsigned long *vm_flags);
void try_to_migrate(struct page *page, enum ttu_flags flags);
void try_to_unmap(struct page *, enum ttu_flags flags); void try_to_unmap(struct page *, enum ttu_flags flags);
/* Avoid racy checks */ /* Avoid racy checks */
......
...@@ -2309,16 +2309,20 @@ void vma_adjust_trans_huge(struct vm_area_struct *vma, ...@@ -2309,16 +2309,20 @@ void vma_adjust_trans_huge(struct vm_area_struct *vma,
static void unmap_page(struct page *page) static void unmap_page(struct page *page)
{ {
enum ttu_flags ttu_flags = TTU_IGNORE_MLOCK | TTU_SYNC | enum ttu_flags ttu_flags = TTU_RMAP_LOCKED | TTU_SPLIT_HUGE_PMD |
TTU_RMAP_LOCKED | TTU_SPLIT_HUGE_PMD; TTU_SYNC;
VM_BUG_ON_PAGE(!PageHead(page), page); VM_BUG_ON_PAGE(!PageHead(page), page);
/* If TTU_SPLIT_FREEZE is ever extended to file, update remap_page() */ /*
* Anon pages need migration entries to preserve them, but file
* pages can simply be left unmapped, then faulted back on demand.
* If that is ever changed (perhaps for mlock), update remap_page().
*/
if (PageAnon(page)) if (PageAnon(page))
ttu_flags |= TTU_SPLIT_FREEZE; try_to_migrate(page, ttu_flags);
else
try_to_unmap(page, ttu_flags); try_to_unmap(page, ttu_flags | TTU_IGNORE_MLOCK);
VM_WARN_ON_ONCE_PAGE(page_mapped(page), page); VM_WARN_ON_ONCE_PAGE(page_mapped(page), page);
} }
......
...@@ -1109,7 +1109,7 @@ static int __unmap_and_move(struct page *page, struct page *newpage, ...@@ -1109,7 +1109,7 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
/* Establish migration ptes */ /* Establish migration ptes */
VM_BUG_ON_PAGE(PageAnon(page) && !PageKsm(page) && !anon_vma, VM_BUG_ON_PAGE(PageAnon(page) && !PageKsm(page) && !anon_vma,
page); page);
try_to_unmap(page, TTU_MIGRATION|TTU_IGNORE_MLOCK); try_to_migrate(page, 0);
page_was_mapped = 1; page_was_mapped = 1;
} }
...@@ -1311,7 +1311,7 @@ static int unmap_and_move_huge_page(new_page_t get_new_page, ...@@ -1311,7 +1311,7 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
if (page_mapped(hpage)) { if (page_mapped(hpage)) {
bool mapping_locked = false; bool mapping_locked = false;
enum ttu_flags ttu = TTU_MIGRATION|TTU_IGNORE_MLOCK; enum ttu_flags ttu = 0;
if (!PageAnon(hpage)) { if (!PageAnon(hpage)) {
/* /*
...@@ -1328,7 +1328,7 @@ static int unmap_and_move_huge_page(new_page_t get_new_page, ...@@ -1328,7 +1328,7 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
ttu |= TTU_RMAP_LOCKED; ttu |= TTU_RMAP_LOCKED;
} }
try_to_unmap(hpage, ttu); try_to_migrate(hpage, ttu);
page_was_mapped = 1; page_was_mapped = 1;
if (mapping_locked) if (mapping_locked)
...@@ -2602,7 +2602,6 @@ static void migrate_vma_prepare(struct migrate_vma *migrate) ...@@ -2602,7 +2602,6 @@ static void migrate_vma_prepare(struct migrate_vma *migrate)
*/ */
static void migrate_vma_unmap(struct migrate_vma *migrate) static void migrate_vma_unmap(struct migrate_vma *migrate)
{ {
int flags = TTU_MIGRATION | TTU_IGNORE_MLOCK;
const unsigned long npages = migrate->npages; const unsigned long npages = migrate->npages;
const unsigned long start = migrate->start; const unsigned long start = migrate->start;
unsigned long addr, i, restore = 0; unsigned long addr, i, restore = 0;
...@@ -2614,7 +2613,7 @@ static void migrate_vma_unmap(struct migrate_vma *migrate) ...@@ -2614,7 +2613,7 @@ static void migrate_vma_unmap(struct migrate_vma *migrate)
continue; continue;
if (page_mapped(page)) { if (page_mapped(page)) {
try_to_unmap(page, flags); try_to_migrate(page, 0);
if (page_mapped(page)) if (page_mapped(page))
goto restore; goto restore;
} }
......
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment