Commit d95ea5d1 authored by Bartlomiej Zolnierkiewicz's avatar Bartlomiej Zolnierkiewicz Committed by Linus Torvalds

cma: fix watermark checking

* Add ALLOC_CMA alloc flag and pass it to [__]zone_watermark_ok()
  (from Minchan Kim).

* During watermark check decrease available free pages number by
  free CMA pages number if necessary (unmovable allocations cannot
  use pages from CMA areas).
Signed-off-by: default avatarBartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com>
Signed-off-by: default avatarKyungmin Park <kyungmin.park@samsung.com>
Cc: Marek Szyprowski <m.szyprowski@samsung.com>
Cc: Michal Nazarewicz <mina86@mina86.com>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Hugh Dickins <hughd@google.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent d1ce749a
...@@ -934,6 +934,7 @@ unsigned long try_to_compact_pages(struct zonelist *zonelist, ...@@ -934,6 +934,7 @@ unsigned long try_to_compact_pages(struct zonelist *zonelist,
struct zoneref *z; struct zoneref *z;
struct zone *zone; struct zone *zone;
int rc = COMPACT_SKIPPED; int rc = COMPACT_SKIPPED;
int alloc_flags = 0;
/* Check if the GFP flags allow compaction */ /* Check if the GFP flags allow compaction */
if (!order || !may_enter_fs || !may_perform_io) if (!order || !may_enter_fs || !may_perform_io)
...@@ -941,6 +942,10 @@ unsigned long try_to_compact_pages(struct zonelist *zonelist, ...@@ -941,6 +942,10 @@ unsigned long try_to_compact_pages(struct zonelist *zonelist,
count_vm_event(COMPACTSTALL); count_vm_event(COMPACTSTALL);
#ifdef CONFIG_CMA
if (allocflags_to_migratetype(gfp_mask) == MIGRATE_MOVABLE)
alloc_flags |= ALLOC_CMA;
#endif
/* Compact each zone in the list */ /* Compact each zone in the list */
for_each_zone_zonelist_nodemask(zone, z, zonelist, high_zoneidx, for_each_zone_zonelist_nodemask(zone, z, zonelist, high_zoneidx,
nodemask) { nodemask) {
...@@ -951,7 +956,8 @@ unsigned long try_to_compact_pages(struct zonelist *zonelist, ...@@ -951,7 +956,8 @@ unsigned long try_to_compact_pages(struct zonelist *zonelist,
rc = max(status, rc); rc = max(status, rc);
/* If a normal allocation would succeed, stop compacting */ /* If a normal allocation would succeed, stop compacting */
if (zone_watermark_ok(zone, order, low_wmark_pages(zone), 0, 0)) if (zone_watermark_ok(zone, order, low_wmark_pages(zone), 0,
alloc_flags))
break; break;
} }
......
...@@ -358,4 +358,18 @@ extern unsigned long vm_mmap_pgoff(struct file *, unsigned long, ...@@ -358,4 +358,18 @@ extern unsigned long vm_mmap_pgoff(struct file *, unsigned long,
extern void set_pageblock_order(void); extern void set_pageblock_order(void);
unsigned long reclaim_clean_pages_from_list(struct zone *zone, unsigned long reclaim_clean_pages_from_list(struct zone *zone,
struct list_head *page_list); struct list_head *page_list);
/* The ALLOC_WMARK bits are used as an index to zone->watermark */
#define ALLOC_WMARK_MIN WMARK_MIN
#define ALLOC_WMARK_LOW WMARK_LOW
#define ALLOC_WMARK_HIGH WMARK_HIGH
#define ALLOC_NO_WATERMARKS 0x04 /* don't check watermarks at all */
/* Mask to get the watermark bits */
#define ALLOC_WMARK_MASK (ALLOC_NO_WATERMARKS-1)
#define ALLOC_HARDER 0x10 /* try to alloc harder */
#define ALLOC_HIGH 0x20 /* __GFP_HIGH set */
#define ALLOC_CPUSET 0x40 /* check for correct cpuset */
#define ALLOC_CMA 0x80 /* allow allocations from CMA areas */
#endif /* __MM_INTERNAL_H */ #endif /* __MM_INTERNAL_H */
...@@ -1541,19 +1541,6 @@ struct page *buffered_rmqueue(struct zone *preferred_zone, ...@@ -1541,19 +1541,6 @@ struct page *buffered_rmqueue(struct zone *preferred_zone,
return NULL; return NULL;
} }
/* The ALLOC_WMARK bits are used as an index to zone->watermark */
#define ALLOC_WMARK_MIN WMARK_MIN
#define ALLOC_WMARK_LOW WMARK_LOW
#define ALLOC_WMARK_HIGH WMARK_HIGH
#define ALLOC_NO_WATERMARKS 0x04 /* don't check watermarks at all */
/* Mask to get the watermark bits */
#define ALLOC_WMARK_MASK (ALLOC_NO_WATERMARKS-1)
#define ALLOC_HARDER 0x10 /* try to alloc harder */
#define ALLOC_HIGH 0x20 /* __GFP_HIGH set */
#define ALLOC_CPUSET 0x40 /* check for correct cpuset */
#ifdef CONFIG_FAIL_PAGE_ALLOC #ifdef CONFIG_FAIL_PAGE_ALLOC
static struct { static struct {
...@@ -1648,7 +1635,11 @@ static bool __zone_watermark_ok(struct zone *z, int order, unsigned long mark, ...@@ -1648,7 +1635,11 @@ static bool __zone_watermark_ok(struct zone *z, int order, unsigned long mark,
min -= min / 2; min -= min / 2;
if (alloc_flags & ALLOC_HARDER) if (alloc_flags & ALLOC_HARDER)
min -= min / 4; min -= min / 4;
#ifdef CONFIG_CMA
/* If allocation can't use CMA areas don't use free CMA pages */
if (!(alloc_flags & ALLOC_CMA))
free_pages -= zone_page_state(z, NR_FREE_CMA_PAGES);
#endif
if (free_pages <= min + lowmem_reserve) if (free_pages <= min + lowmem_reserve)
return false; return false;
for (o = 0; o < order; o++) { for (o = 0; o < order; o++) {
...@@ -2362,7 +2353,10 @@ gfp_to_alloc_flags(gfp_t gfp_mask) ...@@ -2362,7 +2353,10 @@ gfp_to_alloc_flags(gfp_t gfp_mask)
unlikely(test_thread_flag(TIF_MEMDIE)))) unlikely(test_thread_flag(TIF_MEMDIE))))
alloc_flags |= ALLOC_NO_WATERMARKS; alloc_flags |= ALLOC_NO_WATERMARKS;
} }
#ifdef CONFIG_CMA
if (allocflags_to_migratetype(gfp_mask) == MIGRATE_MOVABLE)
alloc_flags |= ALLOC_CMA;
#endif
return alloc_flags; return alloc_flags;
} }
...@@ -2587,6 +2581,7 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, ...@@ -2587,6 +2581,7 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
struct page *page = NULL; struct page *page = NULL;
int migratetype = allocflags_to_migratetype(gfp_mask); int migratetype = allocflags_to_migratetype(gfp_mask);
unsigned int cpuset_mems_cookie; unsigned int cpuset_mems_cookie;
int alloc_flags = ALLOC_WMARK_LOW|ALLOC_CPUSET;
gfp_mask &= gfp_allowed_mask; gfp_mask &= gfp_allowed_mask;
...@@ -2615,9 +2610,13 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, ...@@ -2615,9 +2610,13 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
if (!preferred_zone) if (!preferred_zone)
goto out; goto out;
#ifdef CONFIG_CMA
if (allocflags_to_migratetype(gfp_mask) == MIGRATE_MOVABLE)
alloc_flags |= ALLOC_CMA;
#endif
/* First allocation attempt */ /* First allocation attempt */
page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask, order, page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask, order,
zonelist, high_zoneidx, ALLOC_WMARK_LOW|ALLOC_CPUSET, zonelist, high_zoneidx, alloc_flags,
preferred_zone, migratetype); preferred_zone, migratetype);
if (unlikely(!page)) if (unlikely(!page))
page = __alloc_pages_slowpath(gfp_mask, order, page = __alloc_pages_slowpath(gfp_mask, order,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment