Commit f9f38f78 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Matthew Wilcox (Oracle)

mm: refactor check_and_migrate_movable_pages

Remove up to two levels of indentation by using continue statements
and move variables to local scope where possible.

Link: https://lkml.kernel.org/r/20220210072828.2930359-11-hch@lst.deSigned-off-by: default avatarChristoph Hellwig <hch@lst.de>
Tested-by: default avatar"Sierra Guiza, Alejandro (Alex)" <alex.sierra@amd.com>

Cc: Alex Deucher <alexander.deucher@amd.com>
Cc: Alistair Popple <apopple@nvidia.com>
Cc: Ben Skeggs <bskeggs@redhat.com>
Cc: Chaitanya Kulkarni <kch@nvidia.com>
Cc: Christian Knig <christian.koenig@amd.com>
Cc: Dan Williams <dan.j.williams@intel.com>
Cc: Felix Kuehling <Felix.Kuehling@amd.com>
Cc: Jason Gunthorpe <jgg@nvidia.com>
Cc: Karol Herbst <kherbst@redhat.com>
Cc: Logan Gunthorpe <logang@deltatee.com>
Cc: Lyude Paul <lyude@redhat.com>
Cc: Miaohe Lin <linmiaohe@huawei.com>
Cc: Muchun Song <songmuchun@bytedance.com>
Cc: "Pan, Xinhui" <Xinhui.Pan@amd.com>
Cc: Ralph Campbell <rcampbell@nvidia.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
parent 5cbf9942
...@@ -1841,32 +1841,31 @@ static long check_and_migrate_movable_pages(unsigned long nr_pages, ...@@ -1841,32 +1841,31 @@ static long check_and_migrate_movable_pages(unsigned long nr_pages,
struct page **pages, struct page **pages,
unsigned int gup_flags) unsigned int gup_flags)
{ {
unsigned long i; unsigned long isolation_error_count = 0, i;
unsigned long isolation_error_count = 0;
bool drain_allow = true;
LIST_HEAD(movable_page_list);
long ret = 0;
struct page *prev_head = NULL; struct page *prev_head = NULL;
struct page *head; LIST_HEAD(movable_page_list);
struct migration_target_control mtc = { bool drain_allow = true;
.nid = NUMA_NO_NODE, int ret = 0;
.gfp_mask = GFP_USER | __GFP_NOWARN,
};
for (i = 0; i < nr_pages; i++) { for (i = 0; i < nr_pages; i++) {
head = compound_head(pages[i]); struct page *head = compound_head(pages[i]);
if (head == prev_head) if (head == prev_head)
continue; continue;
prev_head = head; prev_head = head;
if (is_pinnable_page(head))
continue;
/* /*
* If we get a movable page, since we are going to be pinning * Try to move out any movable page before pinning the range.
* these entries, try to move them out if possible.
*/ */
if (!is_pinnable_page(head)) {
if (PageHuge(head)) { if (PageHuge(head)) {
if (!isolate_huge_page(head, &movable_page_list)) if (!isolate_huge_page(head, &movable_page_list))
isolation_error_count++; isolation_error_count++;
} else { continue;
}
if (!PageLRU(head) && drain_allow) { if (!PageLRU(head) && drain_allow) {
lru_add_drain_all(); lru_add_drain_all();
drain_allow = false; drain_allow = false;
...@@ -1878,35 +1877,43 @@ static long check_and_migrate_movable_pages(unsigned long nr_pages, ...@@ -1878,35 +1877,43 @@ static long check_and_migrate_movable_pages(unsigned long nr_pages,
} }
list_add_tail(&head->lru, &movable_page_list); list_add_tail(&head->lru, &movable_page_list);
mod_node_page_state(page_pgdat(head), mod_node_page_state(page_pgdat(head),
NR_ISOLATED_ANON + NR_ISOLATED_ANON + page_is_file_lru(head),
page_is_file_lru(head),
thp_nr_pages(head)); thp_nr_pages(head));
} }
}
} if (!list_empty(&movable_page_list) || isolation_error_count)
goto unpin_pages;
/* /*
* If list is empty, and no isolation errors, means that all pages are * If list is empty, and no isolation errors, means that all pages are
* in the correct zone. * in the correct zone.
*/ */
if (list_empty(&movable_page_list) && !isolation_error_count)
return nr_pages; return nr_pages;
unpin_pages:
if (gup_flags & FOLL_PIN) { if (gup_flags & FOLL_PIN) {
unpin_user_pages(pages, nr_pages); unpin_user_pages(pages, nr_pages);
} else { } else {
for (i = 0; i < nr_pages; i++) for (i = 0; i < nr_pages; i++)
put_page(pages[i]); put_page(pages[i]);
} }
if (!list_empty(&movable_page_list)) { if (!list_empty(&movable_page_list)) {
struct migration_target_control mtc = {
.nid = NUMA_NO_NODE,
.gfp_mask = GFP_USER | __GFP_NOWARN,
};
ret = migrate_pages(&movable_page_list, alloc_migration_target, ret = migrate_pages(&movable_page_list, alloc_migration_target,
NULL, (unsigned long)&mtc, MIGRATE_SYNC, NULL, (unsigned long)&mtc, MIGRATE_SYNC,
MR_LONGTERM_PIN, NULL); MR_LONGTERM_PIN, NULL);
if (ret && !list_empty(&movable_page_list)) if (ret > 0) /* number of pages not migrated */
putback_movable_pages(&movable_page_list); ret = -ENOMEM;
} }
return ret > 0 ? -ENOMEM : ret; if (ret && !list_empty(&movable_page_list))
putback_movable_pages(&movable_page_list);
return ret;
} }
#else #else
static long check_and_migrate_movable_pages(unsigned long nr_pages, static long check_and_migrate_movable_pages(unsigned long nr_pages,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment