Commit 3d680bdf authored by Qian Cai's avatar Qian Cai Committed by Linus Torvalds

mm/page_isolation: fix potential warning from user

It makes sense to call the WARN_ON_ONCE(zone_idx(zone) == ZONE_MOVABLE)
from start_isolate_page_range(), but should avoid triggering it from
userspace, i.e, from is_mem_section_removable() because it could crash
the system by a non-root user if warn_on_panic is set.

While at it, simplify the code a bit by removing an unnecessary jump
label.

Link: http://lkml.kernel.org/r/20200120163915.1469-1-cai@lca.pwSigned-off-by: default avatarQian Cai <cai@lca.pw>
Suggested-by: default avatarMichal Hocko <mhocko@kernel.org>
Acked-by: default avatarMichal Hocko <mhocko@suse.com>
Reviewed-by: default avatarDavid Hildenbrand <david@redhat.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 4a55c047
...@@ -8214,7 +8214,7 @@ struct page *has_unmovable_pages(struct zone *zone, struct page *page, ...@@ -8214,7 +8214,7 @@ struct page *has_unmovable_pages(struct zone *zone, struct page *page,
if (is_migrate_cma(migratetype)) if (is_migrate_cma(migratetype))
return NULL; return NULL;
goto unmovable; return page;
} }
for (; iter < pageblock_nr_pages; iter++) { for (; iter < pageblock_nr_pages; iter++) {
...@@ -8224,7 +8224,7 @@ struct page *has_unmovable_pages(struct zone *zone, struct page *page, ...@@ -8224,7 +8224,7 @@ struct page *has_unmovable_pages(struct zone *zone, struct page *page,
page = pfn_to_page(pfn + iter); page = pfn_to_page(pfn + iter);
if (PageReserved(page)) if (PageReserved(page))
goto unmovable; return page;
/* /*
* If the zone is movable and we have ruled out all reserved * If the zone is movable and we have ruled out all reserved
...@@ -8244,7 +8244,7 @@ struct page *has_unmovable_pages(struct zone *zone, struct page *page, ...@@ -8244,7 +8244,7 @@ struct page *has_unmovable_pages(struct zone *zone, struct page *page,
unsigned int skip_pages; unsigned int skip_pages;
if (!hugepage_migration_supported(page_hstate(head))) if (!hugepage_migration_supported(page_hstate(head)))
goto unmovable; return page;
skip_pages = compound_nr(head) - (page - head); skip_pages = compound_nr(head) - (page - head);
iter += skip_pages - 1; iter += skip_pages - 1;
...@@ -8286,12 +8286,9 @@ struct page *has_unmovable_pages(struct zone *zone, struct page *page, ...@@ -8286,12 +8286,9 @@ struct page *has_unmovable_pages(struct zone *zone, struct page *page,
* is set to both of a memory hole page and a _used_ kernel * is set to both of a memory hole page and a _used_ kernel
* page at boot. * page at boot.
*/ */
goto unmovable; return page;
} }
return NULL; return NULL;
unmovable:
WARN_ON_ONCE(zone_idx(zone) == ZONE_MOVABLE);
return pfn_to_page(pfn + iter);
} }
#ifdef CONFIG_CONTIG_ALLOC #ifdef CONFIG_CONTIG_ALLOC
......
...@@ -54,14 +54,18 @@ static int set_migratetype_isolate(struct page *page, int migratetype, int isol_ ...@@ -54,14 +54,18 @@ static int set_migratetype_isolate(struct page *page, int migratetype, int isol_
out: out:
spin_unlock_irqrestore(&zone->lock, flags); spin_unlock_irqrestore(&zone->lock, flags);
if (!ret) if (!ret) {
drain_all_pages(zone); drain_all_pages(zone);
else if ((isol_flags & REPORT_FAILURE) && unmovable) } else {
/* WARN_ON_ONCE(zone_idx(zone) == ZONE_MOVABLE);
* printk() with zone->lock held will guarantee to trigger a
* lockdep splat, so defer it here. if ((isol_flags & REPORT_FAILURE) && unmovable)
*/ /*
dump_page(unmovable, "unmovable page"); * printk() with zone->lock held will likely trigger a
* lockdep splat, so defer it here.
*/
dump_page(unmovable, "unmovable page");
}
return ret; return ret;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment