Commit f68749ec authored by Pavel Tatashin's avatar Pavel Tatashin Committed by Linus Torvalds

mm/gup: longterm pin migration cleanup

When pages are longterm pinned, we must migrated them out of movable zone.
The function that migrates them has a hidden loop with goto.  The loop is
to retry on isolation failures, and after successful migration.

Make this code better by moving this loop to the caller.

Link: https://lkml.kernel.org/r/20210215161349.246722-13-pasha.tatashin@soleen.comSigned-off-by: default avatarPavel Tatashin <pasha.tatashin@soleen.com>
Reviewed-by: default avatarJason Gunthorpe <jgg@nvidia.com>
Cc: Dan Williams <dan.j.williams@intel.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Ira Weiny <ira.weiny@intel.com>
Cc: James Morris <jmorris@namei.org>
Cc: Jason Gunthorpe <jgg@ziepe.ca>
Cc: John Hubbard <jhubbard@nvidia.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Mike Kravetz <mike.kravetz@oracle.com>
Cc: Oscar Salvador <osalvador@suse.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Sasha Levin <sashal@kernel.org>
Cc: Steven Rostedt (VMware) <rostedt@goodmis.org>
Cc: Tyler Hicks <tyhicks@linux.microsoft.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 24dc20c7
......@@ -1602,27 +1602,28 @@ struct page *get_dump_page(unsigned long addr)
#endif /* CONFIG_ELF_CORE */
#ifdef CONFIG_MIGRATION
static long check_and_migrate_movable_pages(struct mm_struct *mm,
unsigned long start,
unsigned long nr_pages,
/*
* Check whether all pages are pinnable, if so return number of pages. If some
* pages are not pinnable, migrate them, and unpin all pages. Return zero if
* pages were migrated, or if some pages were not successfully isolated.
* Return negative error if migration fails.
*/
static long check_and_migrate_movable_pages(unsigned long nr_pages,
struct page **pages,
struct vm_area_struct **vmas,
unsigned int gup_flags)
{
unsigned long i, isolation_error_count;
bool drain_allow;
unsigned long i;
unsigned long isolation_error_count = 0;
bool drain_allow = true;
LIST_HEAD(movable_page_list);
long ret = nr_pages;
struct page *prev_head, *head;
long ret = 0;
struct page *prev_head = NULL;
struct page *head;
struct migration_target_control mtc = {
.nid = NUMA_NO_NODE,
.gfp_mask = GFP_USER | __GFP_NOWARN,
};
check_again:
prev_head = NULL;
isolation_error_count = 0;
drain_allow = true;
for (i = 0; i < nr_pages; i++) {
head = compound_head(pages[i]);
if (head == prev_head)
......@@ -1660,47 +1661,27 @@ static long check_and_migrate_movable_pages(struct mm_struct *mm,
* in the correct zone.
*/
if (list_empty(&movable_page_list) && !isolation_error_count)
return ret;
return nr_pages;
if (gup_flags & FOLL_PIN) {
unpin_user_pages(pages, nr_pages);
} else {
for (i = 0; i < nr_pages; i++)
put_page(pages[i]);
}
if (!list_empty(&movable_page_list)) {
/*
* drop the above get_user_pages reference.
*/
if (gup_flags & FOLL_PIN)
unpin_user_pages(pages, nr_pages);
else
for (i = 0; i < nr_pages; i++)
put_page(pages[i]);
ret = migrate_pages(&movable_page_list, alloc_migration_target,
NULL, (unsigned long)&mtc, MIGRATE_SYNC,
MR_LONGTERM_PIN);
if (ret) {
if (!list_empty(&movable_page_list))
putback_movable_pages(&movable_page_list);
return ret > 0 ? -ENOMEM : ret;
}
/* We unpinned pages before migration, pin them again */
ret = __get_user_pages_locked(mm, start, nr_pages, pages, vmas,
NULL, gup_flags);
if (ret <= 0)
return ret;
nr_pages = ret;
if (ret && !list_empty(&movable_page_list))
putback_movable_pages(&movable_page_list);
}
/*
* check again because pages were unpinned, and we also might have
* had isolation errors and need more pages to migrate.
*/
goto check_again;
return ret > 0 ? -ENOMEM : ret;
}
#else
static long check_and_migrate_movable_pages(struct mm_struct *mm,
unsigned long start,
unsigned long nr_pages,
static long check_and_migrate_movable_pages(unsigned long nr_pages,
struct page **pages,
struct vm_area_struct **vmas,
unsigned int gup_flags)
{
return nr_pages;
......@@ -1718,22 +1699,22 @@ static long __gup_longterm_locked(struct mm_struct *mm,
struct vm_area_struct **vmas,
unsigned int gup_flags)
{
unsigned long flags = 0;
unsigned int flags;
long rc;
if (gup_flags & FOLL_LONGTERM)
flags = memalloc_pin_save();
rc = __get_user_pages_locked(mm, start, nr_pages, pages, vmas, NULL,
gup_flags);
if (!(gup_flags & FOLL_LONGTERM))
return __get_user_pages_locked(mm, start, nr_pages, pages, vmas,
NULL, gup_flags);
flags = memalloc_pin_save();
do {
rc = __get_user_pages_locked(mm, start, nr_pages, pages, vmas,
NULL, gup_flags);
if (rc <= 0)
break;
rc = check_and_migrate_movable_pages(rc, pages, gup_flags);
} while (!rc);
memalloc_pin_restore(flags);
if (gup_flags & FOLL_LONGTERM) {
if (rc > 0)
rc = check_and_migrate_movable_pages(mm, start, rc,
pages, vmas,
gup_flags);
memalloc_pin_restore(flags);
}
return rc;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment