Commit 47118af0 authored by Michal Nazarewicz's avatar Michal Nazarewicz Committed by Marek Szyprowski

mm: mmzone: MIGRATE_CMA migration type added

The MIGRATE_CMA migration type has two main characteristics:
(i) only movable pages can be allocated from MIGRATE_CMA
pageblocks and (ii) page allocator will never change migration
type of MIGRATE_CMA pageblocks.

This guarantees (to some degree) that page in a MIGRATE_CMA page
block can always be migrated somewhere else (unless there's no
memory left in the system).

It is designed to be used for allocating big chunks (eg. 10MiB)
of physically contiguous memory.  Once driver requests
contiguous memory, pages from MIGRATE_CMA pageblocks may be
migrated away to create a contiguous block.

To minimise number of migrations, MIGRATE_CMA migration type
is the last type tried when page allocator falls back to other
migration types when requested.
Signed-off-by: default avatarMichal Nazarewicz <mina86@mina86.com>
Signed-off-by: default avatarMarek Szyprowski <m.szyprowski@samsung.com>
Signed-off-by: default avatarKyungmin Park <kyungmin.park@samsung.com>
Acked-by: default avatarMel Gorman <mel@csn.ul.ie>
Reviewed-by: default avatarKAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Tested-by: default avatarRob Clark <rob.clark@linaro.org>
Tested-by: default avatarOhad Ben-Cohen <ohad@wizery.com>
Tested-by: default avatarBenjamin Gaignard <benjamin.gaignard@linaro.org>
Tested-by: default avatarRobert Nelson <robertcnelson@gmail.com>
Tested-by: default avatarBarry Song <Baohua.Song@csr.com>
parent 6d4a4916
...@@ -397,6 +397,9 @@ static inline bool pm_suspended_storage(void) ...@@ -397,6 +397,9 @@ static inline bool pm_suspended_storage(void)
extern int alloc_contig_range(unsigned long start, unsigned long end); extern int alloc_contig_range(unsigned long start, unsigned long end);
extern void free_contig_range(unsigned long pfn, unsigned nr_pages); extern void free_contig_range(unsigned long pfn, unsigned nr_pages);
/* CMA stuff */
extern void init_cma_reserved_pageblock(struct page *page);
#endif #endif
#endif /* __LINUX_GFP_H */ #endif /* __LINUX_GFP_H */
...@@ -35,13 +35,37 @@ ...@@ -35,13 +35,37 @@
*/ */
#define PAGE_ALLOC_COSTLY_ORDER 3 #define PAGE_ALLOC_COSTLY_ORDER 3
#define MIGRATE_UNMOVABLE 0 enum {
#define MIGRATE_RECLAIMABLE 1 MIGRATE_UNMOVABLE,
#define MIGRATE_MOVABLE 2 MIGRATE_RECLAIMABLE,
#define MIGRATE_PCPTYPES 3 /* the number of types on the pcp lists */ MIGRATE_MOVABLE,
#define MIGRATE_RESERVE 3 MIGRATE_PCPTYPES, /* the number of types on the pcp lists */
#define MIGRATE_ISOLATE 4 /* can't allocate from here */ MIGRATE_RESERVE = MIGRATE_PCPTYPES,
#define MIGRATE_TYPES 5 #ifdef CONFIG_CMA
/*
* MIGRATE_CMA migration type is designed to mimic the way
* ZONE_MOVABLE works. Only movable pages can be allocated
* from MIGRATE_CMA pageblocks and page allocator never
* implicitly change migration type of MIGRATE_CMA pageblock.
*
* The way to use it is to change migratetype of a range of
* pageblocks to MIGRATE_CMA which can be done by
* __free_pageblock_cma() function. What is important though
* is that a range of pageblocks must be aligned to
* MAX_ORDER_NR_PAGES should biggest page be bigger then
* a single pageblock.
*/
MIGRATE_CMA,
#endif
MIGRATE_ISOLATE, /* can't allocate from here */
MIGRATE_TYPES
};
#ifdef CONFIG_CMA
# define is_migrate_cma(migratetype) unlikely((migratetype) == MIGRATE_CMA)
#else
# define is_migrate_cma(migratetype) false
#endif
#define for_each_migratetype_order(order, type) \ #define for_each_migratetype_order(order, type) \
for (order = 0; order < MAX_ORDER; order++) \ for (order = 0; order < MAX_ORDER; order++) \
......
...@@ -198,7 +198,7 @@ config COMPACTION ...@@ -198,7 +198,7 @@ config COMPACTION
config MIGRATION config MIGRATION
bool "Page migration" bool "Page migration"
def_bool y def_bool y
depends on NUMA || ARCH_ENABLE_MEMORY_HOTREMOVE || COMPACTION depends on NUMA || ARCH_ENABLE_MEMORY_HOTREMOVE || COMPACTION || CMA
help help
Allows the migration of the physical location of pages of processes Allows the migration of the physical location of pages of processes
while the virtual addresses are not changed. This is useful in while the virtual addresses are not changed. This is useful in
......
...@@ -45,6 +45,11 @@ static void map_pages(struct list_head *list) ...@@ -45,6 +45,11 @@ static void map_pages(struct list_head *list)
} }
} }
static inline bool migrate_async_suitable(int migratetype)
{
return is_migrate_cma(migratetype) || migratetype == MIGRATE_MOVABLE;
}
/* /*
* Isolate free pages onto a private freelist. Caller must hold zone->lock. * Isolate free pages onto a private freelist. Caller must hold zone->lock.
* If @strict is true, will abort returning 0 on any invalid PFNs or non-free * If @strict is true, will abort returning 0 on any invalid PFNs or non-free
...@@ -299,7 +304,7 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc, ...@@ -299,7 +304,7 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
*/ */
pageblock_nr = low_pfn >> pageblock_order; pageblock_nr = low_pfn >> pageblock_order;
if (!cc->sync && last_pageblock_nr != pageblock_nr && if (!cc->sync && last_pageblock_nr != pageblock_nr &&
get_pageblock_migratetype(page) != MIGRATE_MOVABLE) { !migrate_async_suitable(get_pageblock_migratetype(page))) {
low_pfn += pageblock_nr_pages; low_pfn += pageblock_nr_pages;
low_pfn = ALIGN(low_pfn, pageblock_nr_pages) - 1; low_pfn = ALIGN(low_pfn, pageblock_nr_pages) - 1;
last_pageblock_nr = pageblock_nr; last_pageblock_nr = pageblock_nr;
...@@ -367,8 +372,8 @@ static bool suitable_migration_target(struct page *page) ...@@ -367,8 +372,8 @@ static bool suitable_migration_target(struct page *page)
if (PageBuddy(page) && page_order(page) >= pageblock_order) if (PageBuddy(page) && page_order(page) >= pageblock_order)
return true; return true;
/* If the block is MIGRATE_MOVABLE, allow migration */ /* If the block is MIGRATE_MOVABLE or MIGRATE_CMA, allow migration */
if (migratetype == MIGRATE_MOVABLE) if (migrate_async_suitable(migratetype))
return true; return true;
/* Otherwise skip the block */ /* Otherwise skip the block */
......
...@@ -750,6 +750,24 @@ void __meminit __free_pages_bootmem(struct page *page, unsigned int order) ...@@ -750,6 +750,24 @@ void __meminit __free_pages_bootmem(struct page *page, unsigned int order)
__free_pages(page, order); __free_pages(page, order);
} }
#ifdef CONFIG_CMA
/* Free whole pageblock and set it's migration type to MIGRATE_CMA. */
void __init init_cma_reserved_pageblock(struct page *page)
{
unsigned i = pageblock_nr_pages;
struct page *p = page;
do {
__ClearPageReserved(p);
set_page_count(p, 0);
} while (++p, --i);
set_page_refcounted(page);
set_pageblock_migratetype(page, MIGRATE_CMA);
__free_pages(page, pageblock_order);
totalram_pages += pageblock_nr_pages;
}
#endif
/* /*
* The order of subdivision here is critical for the IO subsystem. * The order of subdivision here is critical for the IO subsystem.
...@@ -875,10 +893,15 @@ struct page *__rmqueue_smallest(struct zone *zone, unsigned int order, ...@@ -875,10 +893,15 @@ struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
* This array describes the order lists are fallen back to when * This array describes the order lists are fallen back to when
* the free lists for the desirable migrate type are depleted * the free lists for the desirable migrate type are depleted
*/ */
static int fallbacks[MIGRATE_TYPES][3] = { static int fallbacks[MIGRATE_TYPES][4] = {
[MIGRATE_UNMOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE, MIGRATE_RESERVE }, [MIGRATE_UNMOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE, MIGRATE_RESERVE },
[MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE, MIGRATE_MOVABLE, MIGRATE_RESERVE }, [MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE, MIGRATE_MOVABLE, MIGRATE_RESERVE },
#ifdef CONFIG_CMA
[MIGRATE_MOVABLE] = { MIGRATE_CMA, MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_RESERVE },
[MIGRATE_CMA] = { MIGRATE_RESERVE }, /* Never used */
#else
[MIGRATE_MOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_RESERVE }, [MIGRATE_MOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_RESERVE },
#endif
[MIGRATE_RESERVE] = { MIGRATE_RESERVE }, /* Never used */ [MIGRATE_RESERVE] = { MIGRATE_RESERVE }, /* Never used */
[MIGRATE_ISOLATE] = { MIGRATE_RESERVE }, /* Never used */ [MIGRATE_ISOLATE] = { MIGRATE_RESERVE }, /* Never used */
}; };
...@@ -995,11 +1018,18 @@ __rmqueue_fallback(struct zone *zone, int order, int start_migratetype) ...@@ -995,11 +1018,18 @@ __rmqueue_fallback(struct zone *zone, int order, int start_migratetype)
* pages to the preferred allocation list. If falling * pages to the preferred allocation list. If falling
* back for a reclaimable kernel allocation, be more * back for a reclaimable kernel allocation, be more
* aggressive about taking ownership of free pages * aggressive about taking ownership of free pages
*/ *
if (unlikely(current_order >= (pageblock_order >> 1)) || * On the other hand, never change migration
* type of MIGRATE_CMA pageblocks nor move CMA
* pages on different free lists. We don't
* want unmovable pages to be allocated from
* MIGRATE_CMA areas.
*/
if (!is_migrate_cma(migratetype) &&
(unlikely(current_order >= pageblock_order / 2) ||
start_migratetype == MIGRATE_RECLAIMABLE || start_migratetype == MIGRATE_RECLAIMABLE ||
page_group_by_mobility_disabled) { page_group_by_mobility_disabled)) {
unsigned long pages; int pages;
pages = move_freepages_block(zone, page, pages = move_freepages_block(zone, page,
start_migratetype); start_migratetype);
...@@ -1017,11 +1047,14 @@ __rmqueue_fallback(struct zone *zone, int order, int start_migratetype) ...@@ -1017,11 +1047,14 @@ __rmqueue_fallback(struct zone *zone, int order, int start_migratetype)
rmv_page_order(page); rmv_page_order(page);
/* Take ownership for orders >= pageblock_order */ /* Take ownership for orders >= pageblock_order */
if (current_order >= pageblock_order) if (current_order >= pageblock_order &&
!is_migrate_cma(migratetype))
change_pageblock_range(page, current_order, change_pageblock_range(page, current_order,
start_migratetype); start_migratetype);
expand(zone, page, order, current_order, area, migratetype); expand(zone, page, order, current_order, area,
is_migrate_cma(migratetype)
? migratetype : start_migratetype);
trace_mm_page_alloc_extfrag(page, order, current_order, trace_mm_page_alloc_extfrag(page, order, current_order,
start_migratetype, migratetype); start_migratetype, migratetype);
...@@ -1072,7 +1105,7 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order, ...@@ -1072,7 +1105,7 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order,
unsigned long count, struct list_head *list, unsigned long count, struct list_head *list,
int migratetype, int cold) int migratetype, int cold)
{ {
int i; int mt = migratetype, i;
spin_lock(&zone->lock); spin_lock(&zone->lock);
for (i = 0; i < count; ++i) { for (i = 0; i < count; ++i) {
...@@ -1093,7 +1126,12 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order, ...@@ -1093,7 +1126,12 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order,
list_add(&page->lru, list); list_add(&page->lru, list);
else else
list_add_tail(&page->lru, list); list_add_tail(&page->lru, list);
set_page_private(page, migratetype); if (IS_ENABLED(CONFIG_CMA)) {
mt = get_pageblock_migratetype(page);
if (!is_migrate_cma(mt) && mt != MIGRATE_ISOLATE)
mt = migratetype;
}
set_page_private(page, mt);
list = &page->lru; list = &page->lru;
} }
__mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order)); __mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order));
...@@ -1373,8 +1411,12 @@ int split_free_page(struct page *page) ...@@ -1373,8 +1411,12 @@ int split_free_page(struct page *page)
if (order >= pageblock_order - 1) { if (order >= pageblock_order - 1) {
struct page *endpage = page + (1 << order) - 1; struct page *endpage = page + (1 << order) - 1;
for (; page < endpage; page += pageblock_nr_pages) for (; page < endpage; page += pageblock_nr_pages) {
set_pageblock_migratetype(page, MIGRATE_MOVABLE); int mt = get_pageblock_migratetype(page);
if (mt != MIGRATE_ISOLATE && !is_migrate_cma(mt))
set_pageblock_migratetype(page,
MIGRATE_MOVABLE);
}
} }
return 1 << order; return 1 << order;
...@@ -5414,14 +5456,16 @@ static int ...@@ -5414,14 +5456,16 @@ static int
__count_immobile_pages(struct zone *zone, struct page *page, int count) __count_immobile_pages(struct zone *zone, struct page *page, int count)
{ {
unsigned long pfn, iter, found; unsigned long pfn, iter, found;
int mt;
/* /*
* For avoiding noise data, lru_add_drain_all() should be called * For avoiding noise data, lru_add_drain_all() should be called
* If ZONE_MOVABLE, the zone never contains immobile pages * If ZONE_MOVABLE, the zone never contains immobile pages
*/ */
if (zone_idx(zone) == ZONE_MOVABLE) if (zone_idx(zone) == ZONE_MOVABLE)
return true; return true;
mt = get_pageblock_migratetype(page);
if (get_pageblock_migratetype(page) == MIGRATE_MOVABLE) if (mt == MIGRATE_MOVABLE || is_migrate_cma(mt))
return true; return true;
pfn = page_to_pfn(page); pfn = page_to_pfn(page);
......
...@@ -613,6 +613,9 @@ static char * const migratetype_names[MIGRATE_TYPES] = { ...@@ -613,6 +613,9 @@ static char * const migratetype_names[MIGRATE_TYPES] = {
"Reclaimable", "Reclaimable",
"Movable", "Movable",
"Reserve", "Reserve",
#ifdef CONFIG_CMA
"CMA",
#endif
"Isolate", "Isolate",
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment