Commit ee64fc93 authored by Mel Gorman's avatar Mel Gorman Committed by Linus Torvalds

mm: vmscan: convert lumpy_mode into a bitmask

Currently lumpy_mode is an enum and determines if lumpy reclaim is off,
syncronous or asyncronous.  In preparation for using compaction instead of
lumpy reclaim, this patch converts the flags into a bitmap.
Signed-off-by: default avatarMel Gorman <mel@csn.ul.ie>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Rik van Riel <riel@redhat.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Andy Whitcroft <apw@shadowen.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent b7aba698
...@@ -25,13 +25,13 @@ ...@@ -25,13 +25,13 @@
#define trace_reclaim_flags(page, sync) ( \ #define trace_reclaim_flags(page, sync) ( \
(page_is_file_cache(page) ? RECLAIM_WB_FILE : RECLAIM_WB_ANON) | \ (page_is_file_cache(page) ? RECLAIM_WB_FILE : RECLAIM_WB_ANON) | \
(sync == LUMPY_MODE_SYNC ? RECLAIM_WB_SYNC : RECLAIM_WB_ASYNC) \ (sync & LUMPY_MODE_SYNC ? RECLAIM_WB_SYNC : RECLAIM_WB_ASYNC) \
) )
#define trace_shrink_flags(file, sync) ( \ #define trace_shrink_flags(file, sync) ( \
(sync == LUMPY_MODE_SYNC ? RECLAIM_WB_MIXED : \ (sync & LUMPY_MODE_SYNC ? RECLAIM_WB_MIXED : \
(file ? RECLAIM_WB_FILE : RECLAIM_WB_ANON)) | \ (file ? RECLAIM_WB_FILE : RECLAIM_WB_ANON)) | \
(sync == LUMPY_MODE_SYNC ? RECLAIM_WB_SYNC : RECLAIM_WB_ASYNC) \ (sync & LUMPY_MODE_SYNC ? RECLAIM_WB_SYNC : RECLAIM_WB_ASYNC) \
) )
TRACE_EVENT(mm_vmscan_kswapd_sleep, TRACE_EVENT(mm_vmscan_kswapd_sleep,
......
...@@ -51,11 +51,20 @@ ...@@ -51,11 +51,20 @@
#define CREATE_TRACE_POINTS #define CREATE_TRACE_POINTS
#include <trace/events/vmscan.h> #include <trace/events/vmscan.h>
enum lumpy_mode { /*
LUMPY_MODE_NONE, * lumpy_mode determines how the inactive list is shrunk
LUMPY_MODE_ASYNC, * LUMPY_MODE_SINGLE: Reclaim only order-0 pages
LUMPY_MODE_SYNC, * LUMPY_MODE_ASYNC: Do not block
}; * LUMPY_MODE_SYNC: Allow blocking e.g. call wait_on_page_writeback
* LUMPY_MODE_CONTIGRECLAIM: For high-order allocations, take a reference
* page from the LRU and reclaim all pages within a
* naturally aligned range
*/
typedef unsigned __bitwise__ lumpy_mode;
#define LUMPY_MODE_SINGLE ((__force lumpy_mode)0x01u)
#define LUMPY_MODE_ASYNC ((__force lumpy_mode)0x02u)
#define LUMPY_MODE_SYNC ((__force lumpy_mode)0x04u)
#define LUMPY_MODE_CONTIGRECLAIM ((__force lumpy_mode)0x08u)
struct scan_control { struct scan_control {
/* Incremented by the number of inactive pages that were scanned */ /* Incremented by the number of inactive pages that were scanned */
...@@ -88,7 +97,7 @@ struct scan_control { ...@@ -88,7 +97,7 @@ struct scan_control {
* Intend to reclaim enough continuous memory rather than reclaim * Intend to reclaim enough continuous memory rather than reclaim
* enough amount of memory. i.e, mode for high order allocation. * enough amount of memory. i.e, mode for high order allocation.
*/ */
enum lumpy_mode lumpy_reclaim_mode; lumpy_mode lumpy_reclaim_mode;
/* Which cgroup do we reclaim from */ /* Which cgroup do we reclaim from */
struct mem_cgroup *mem_cgroup; struct mem_cgroup *mem_cgroup;
...@@ -274,13 +283,13 @@ unsigned long shrink_slab(unsigned long scanned, gfp_t gfp_mask, ...@@ -274,13 +283,13 @@ unsigned long shrink_slab(unsigned long scanned, gfp_t gfp_mask,
static void set_lumpy_reclaim_mode(int priority, struct scan_control *sc, static void set_lumpy_reclaim_mode(int priority, struct scan_control *sc,
bool sync) bool sync)
{ {
enum lumpy_mode mode = sync ? LUMPY_MODE_SYNC : LUMPY_MODE_ASYNC; lumpy_mode syncmode = sync ? LUMPY_MODE_SYNC : LUMPY_MODE_ASYNC;
/* /*
* Some reclaim have alredy been failed. No worth to try synchronous * Some reclaim have alredy been failed. No worth to try synchronous
* lumpy reclaim. * lumpy reclaim.
*/ */
if (sync && sc->lumpy_reclaim_mode == LUMPY_MODE_NONE) if (sync && sc->lumpy_reclaim_mode & LUMPY_MODE_SINGLE)
return; return;
/* /*
...@@ -288,17 +297,18 @@ static void set_lumpy_reclaim_mode(int priority, struct scan_control *sc, ...@@ -288,17 +297,18 @@ static void set_lumpy_reclaim_mode(int priority, struct scan_control *sc,
* trouble getting a small set of contiguous pages, we * trouble getting a small set of contiguous pages, we
* will reclaim both active and inactive pages. * will reclaim both active and inactive pages.
*/ */
sc->lumpy_reclaim_mode = LUMPY_MODE_CONTIGRECLAIM;
if (sc->order > PAGE_ALLOC_COSTLY_ORDER) if (sc->order > PAGE_ALLOC_COSTLY_ORDER)
sc->lumpy_reclaim_mode = mode; sc->lumpy_reclaim_mode |= syncmode;
else if (sc->order && priority < DEF_PRIORITY - 2) else if (sc->order && priority < DEF_PRIORITY - 2)
sc->lumpy_reclaim_mode = mode; sc->lumpy_reclaim_mode |= syncmode;
else else
sc->lumpy_reclaim_mode = LUMPY_MODE_NONE; sc->lumpy_reclaim_mode = LUMPY_MODE_SINGLE | LUMPY_MODE_ASYNC;
} }
static void disable_lumpy_reclaim_mode(struct scan_control *sc) static void disable_lumpy_reclaim_mode(struct scan_control *sc)
{ {
sc->lumpy_reclaim_mode = LUMPY_MODE_NONE; sc->lumpy_reclaim_mode = LUMPY_MODE_SINGLE | LUMPY_MODE_ASYNC;
} }
static inline int is_page_cache_freeable(struct page *page) static inline int is_page_cache_freeable(struct page *page)
...@@ -429,7 +439,7 @@ static pageout_t pageout(struct page *page, struct address_space *mapping, ...@@ -429,7 +439,7 @@ static pageout_t pageout(struct page *page, struct address_space *mapping,
* first attempt to free a range of pages fails. * first attempt to free a range of pages fails.
*/ */
if (PageWriteback(page) && if (PageWriteback(page) &&
sc->lumpy_reclaim_mode == LUMPY_MODE_SYNC) (sc->lumpy_reclaim_mode & LUMPY_MODE_SYNC))
wait_on_page_writeback(page); wait_on_page_writeback(page);
if (!PageWriteback(page)) { if (!PageWriteback(page)) {
...@@ -622,7 +632,7 @@ static enum page_references page_check_references(struct page *page, ...@@ -622,7 +632,7 @@ static enum page_references page_check_references(struct page *page,
referenced_page = TestClearPageReferenced(page); referenced_page = TestClearPageReferenced(page);
/* Lumpy reclaim - ignore references */ /* Lumpy reclaim - ignore references */
if (sc->lumpy_reclaim_mode != LUMPY_MODE_NONE) if (sc->lumpy_reclaim_mode & LUMPY_MODE_CONTIGRECLAIM)
return PAGEREF_RECLAIM; return PAGEREF_RECLAIM;
/* /*
...@@ -739,7 +749,7 @@ static unsigned long shrink_page_list(struct list_head *page_list, ...@@ -739,7 +749,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
* for any page for which writeback has already * for any page for which writeback has already
* started. * started.
*/ */
if (sc->lumpy_reclaim_mode == LUMPY_MODE_SYNC && if ((sc->lumpy_reclaim_mode & LUMPY_MODE_SYNC) &&
may_enter_fs) may_enter_fs)
wait_on_page_writeback(page); wait_on_page_writeback(page);
else { else {
...@@ -1324,7 +1334,7 @@ static inline bool should_reclaim_stall(unsigned long nr_taken, ...@@ -1324,7 +1334,7 @@ static inline bool should_reclaim_stall(unsigned long nr_taken,
return false; return false;
/* Only stall on lumpy reclaim */ /* Only stall on lumpy reclaim */
if (sc->lumpy_reclaim_mode == LUMPY_MODE_NONE) if (sc->lumpy_reclaim_mode & LUMPY_MODE_SINGLE)
return false; return false;
/* If we have relaimed everything on the isolated list, no stall */ /* If we have relaimed everything on the isolated list, no stall */
...@@ -1375,7 +1385,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct zone *zone, ...@@ -1375,7 +1385,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct zone *zone,
if (scanning_global_lru(sc)) { if (scanning_global_lru(sc)) {
nr_taken = isolate_pages_global(nr_to_scan, nr_taken = isolate_pages_global(nr_to_scan,
&page_list, &nr_scanned, sc->order, &page_list, &nr_scanned, sc->order,
sc->lumpy_reclaim_mode == LUMPY_MODE_NONE ? sc->lumpy_reclaim_mode & LUMPY_MODE_SINGLE ?
ISOLATE_INACTIVE : ISOLATE_BOTH, ISOLATE_INACTIVE : ISOLATE_BOTH,
zone, 0, file); zone, 0, file);
zone->pages_scanned += nr_scanned; zone->pages_scanned += nr_scanned;
...@@ -1388,7 +1398,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct zone *zone, ...@@ -1388,7 +1398,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct zone *zone,
} else { } else {
nr_taken = mem_cgroup_isolate_pages(nr_to_scan, nr_taken = mem_cgroup_isolate_pages(nr_to_scan,
&page_list, &nr_scanned, sc->order, &page_list, &nr_scanned, sc->order,
sc->lumpy_reclaim_mode == LUMPY_MODE_NONE ? sc->lumpy_reclaim_mode & LUMPY_MODE_SINGLE ?
ISOLATE_INACTIVE : ISOLATE_BOTH, ISOLATE_INACTIVE : ISOLATE_BOTH,
zone, sc->mem_cgroup, zone, sc->mem_cgroup,
0, file); 0, file);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment