Commit 4da2ce25 authored by Michal Hocko's avatar Michal Hocko Committed by Linus Torvalds

mm: distinguish CMA and MOVABLE isolation in has_unmovable_pages()

Joonsoo has noticed that "mm: drop migrate type checks from
has_unmovable_pages" would break CMA allocator because it relies on
has_unmovable_pages returning false even for CMA pageblocks which in
fact don't have to be movable:

 alloc_contig_range
   start_isolate_page_range
     set_migratetype_isolate
       has_unmovable_pages

This is a result of the code sharing between CMA and memory hotplug
while each one has a different idea of what has_unmovable_pages should
return.  This is unfortunate but fixing it properly would require a lot
of code duplication.

Fix the issue by introducing the requested migrate type argument and
special case MIGRATE_CMA case where CMA page blocks are handled
properly.  This will work for memory hotplug because it requires
MIGRATE_MOVABLE.

Link: http://lkml.kernel.org/r/20171019122118.y6cndierwl2vnguj@dhcp22.suse.czSigned-off-by: default avatarMichal Hocko <mhocko@suse.com>
Reported-by: default avatarJoonsoo Kim <iamjoonsoo.kim@lge.com>
Tested-by: default avatarStefan Wahren <stefan.wahren@i2se.com>
Tested-by: default avatarRan Wang <ran.wang_1@nxp.com>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Igor Mammedov <imammedo@redhat.com>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Reza Arbab <arbab@linux.vnet.ibm.com>
Cc: Vitaly Kuznetsov <vkuznets@redhat.com>
Cc: Xishi Qiu <qiuxishi@huawei.com>
Cc: Yasuaki Ishimatsu <yasu.isimatu@gmail.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent d7b236e1
...@@ -31,7 +31,7 @@ static inline bool is_migrate_isolate(int migratetype) ...@@ -31,7 +31,7 @@ static inline bool is_migrate_isolate(int migratetype)
#endif #endif
bool has_unmovable_pages(struct zone *zone, struct page *page, int count, bool has_unmovable_pages(struct zone *zone, struct page *page, int count,
bool skip_hwpoisoned_pages); int migratetype, bool skip_hwpoisoned_pages);
void set_pageblock_migratetype(struct page *page, int migratetype); void set_pageblock_migratetype(struct page *page, int migratetype);
int move_freepages_block(struct zone *zone, struct page *page, int move_freepages_block(struct zone *zone, struct page *page,
int migratetype, int *num_movable); int migratetype, int *num_movable);
......
...@@ -7353,6 +7353,7 @@ void *__init alloc_large_system_hash(const char *tablename, ...@@ -7353,6 +7353,7 @@ void *__init alloc_large_system_hash(const char *tablename,
* race condition. So you can't expect this function should be exact. * race condition. So you can't expect this function should be exact.
*/ */
bool has_unmovable_pages(struct zone *zone, struct page *page, int count, bool has_unmovable_pages(struct zone *zone, struct page *page, int count,
int migratetype,
bool skip_hwpoisoned_pages) bool skip_hwpoisoned_pages)
{ {
unsigned long pfn, iter, found; unsigned long pfn, iter, found;
...@@ -7364,6 +7365,15 @@ bool has_unmovable_pages(struct zone *zone, struct page *page, int count, ...@@ -7364,6 +7365,15 @@ bool has_unmovable_pages(struct zone *zone, struct page *page, int count,
if (zone_idx(zone) == ZONE_MOVABLE) if (zone_idx(zone) == ZONE_MOVABLE)
return false; return false;
/*
* CMA allocations (alloc_contig_range) really need to mark isolate
* CMA pageblocks even when they are not movable in fact so consider
* them movable here.
*/
if (is_migrate_cma(migratetype) &&
is_migrate_cma(get_pageblock_migratetype(page)))
return false;
pfn = page_to_pfn(page); pfn = page_to_pfn(page);
for (found = 0, iter = 0; iter < pageblock_nr_pages; iter++) { for (found = 0, iter = 0; iter < pageblock_nr_pages; iter++) {
unsigned long check = pfn + iter; unsigned long check = pfn + iter;
...@@ -7446,7 +7456,7 @@ bool is_pageblock_removable_nolock(struct page *page) ...@@ -7446,7 +7456,7 @@ bool is_pageblock_removable_nolock(struct page *page)
if (!zone_spans_pfn(zone, pfn)) if (!zone_spans_pfn(zone, pfn))
return false; return false;
return !has_unmovable_pages(zone, page, 0, true); return !has_unmovable_pages(zone, page, 0, MIGRATE_MOVABLE, true);
} }
#if (defined(CONFIG_MEMORY_ISOLATION) && defined(CONFIG_COMPACTION)) || defined(CONFIG_CMA) #if (defined(CONFIG_MEMORY_ISOLATION) && defined(CONFIG_COMPACTION)) || defined(CONFIG_CMA)
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
#define CREATE_TRACE_POINTS #define CREATE_TRACE_POINTS
#include <trace/events/page_isolation.h> #include <trace/events/page_isolation.h>
static int set_migratetype_isolate(struct page *page, static int set_migratetype_isolate(struct page *page, int migratetype,
bool skip_hwpoisoned_pages) bool skip_hwpoisoned_pages)
{ {
struct zone *zone; struct zone *zone;
...@@ -52,7 +52,7 @@ static int set_migratetype_isolate(struct page *page, ...@@ -52,7 +52,7 @@ static int set_migratetype_isolate(struct page *page,
* FIXME: Now, memory hotplug doesn't call shrink_slab() by itself. * FIXME: Now, memory hotplug doesn't call shrink_slab() by itself.
* We just check MOVABLE pages. * We just check MOVABLE pages.
*/ */
if (!has_unmovable_pages(zone, page, arg.pages_found, if (!has_unmovable_pages(zone, page, arg.pages_found, migratetype,
skip_hwpoisoned_pages)) skip_hwpoisoned_pages))
ret = 0; ret = 0;
...@@ -64,14 +64,14 @@ static int set_migratetype_isolate(struct page *page, ...@@ -64,14 +64,14 @@ static int set_migratetype_isolate(struct page *page,
out: out:
if (!ret) { if (!ret) {
unsigned long nr_pages; unsigned long nr_pages;
int migratetype = get_pageblock_migratetype(page); int mt = get_pageblock_migratetype(page);
set_pageblock_migratetype(page, MIGRATE_ISOLATE); set_pageblock_migratetype(page, MIGRATE_ISOLATE);
zone->nr_isolate_pageblock++; zone->nr_isolate_pageblock++;
nr_pages = move_freepages_block(zone, page, MIGRATE_ISOLATE, nr_pages = move_freepages_block(zone, page, MIGRATE_ISOLATE,
NULL); NULL);
__mod_zone_freepage_state(zone, -nr_pages, migratetype); __mod_zone_freepage_state(zone, -nr_pages, mt);
} }
spin_unlock_irqrestore(&zone->lock, flags); spin_unlock_irqrestore(&zone->lock, flags);
...@@ -183,7 +183,7 @@ int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn, ...@@ -183,7 +183,7 @@ int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
pfn += pageblock_nr_pages) { pfn += pageblock_nr_pages) {
page = __first_valid_page(pfn, pageblock_nr_pages); page = __first_valid_page(pfn, pageblock_nr_pages);
if (page && if (page &&
set_migratetype_isolate(page, skip_hwpoisoned_pages)) { set_migratetype_isolate(page, migratetype, skip_hwpoisoned_pages)) {
undo_pfn = pfn; undo_pfn = pfn;
goto undo; goto undo;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment