Commit 688035f7 authored by Johannes Weiner's avatar Johannes Weiner Committed by Linus Torvalds

mm: don't avoid high-priority reclaim on memcg limit reclaim

Commit 246e87a9 ("memcg: fix get_scan_count() for small targets")
sought to avoid high reclaim priorities for memcg by forcing it to scan
a minimum amount of pages when lru_pages >> priority yielded nothing.
This was done at a time when reclaim decisions like dirty throttling
were tied to the priority level.

Nowadays, the only meaningful thing still tied to priority dropping
below DEF_PRIORITY - 2 is gating whether laptop_mode=1 is generally
allowed to write.  But that is from an era where direct reclaim was
still allowed to call ->writepage, and kswapd nowadays avoids writes
until it's scanned every clean page in the system.  Potential changes to
how quick sc->may_writepage could trigger are of little concern.

Remove the force_scan stuff, as well as the ugly multi-pass target
calculation that it necessitated.

Link: http://lkml.kernel.org/r/20170228214007.5621-7-hannes@cmpxchg.orgSigned-off-by: default avatarJohannes Weiner <hannes@cmpxchg.org>
Acked-by: default avatarHillf Danton <hillf.zj@alibaba-inc.com>
Acked-by: default avatarMichal Hocko <mhocko@suse.com>
Cc: Jia He <hejianet@gmail.com>
Cc: Mel Gorman <mgorman@suse.de>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent a2d7f8e4
...@@ -2123,21 +2123,8 @@ static void get_scan_count(struct lruvec *lruvec, struct mem_cgroup *memcg, ...@@ -2123,21 +2123,8 @@ static void get_scan_count(struct lruvec *lruvec, struct mem_cgroup *memcg,
unsigned long anon_prio, file_prio; unsigned long anon_prio, file_prio;
enum scan_balance scan_balance; enum scan_balance scan_balance;
unsigned long anon, file; unsigned long anon, file;
bool force_scan = false;
unsigned long ap, fp; unsigned long ap, fp;
enum lru_list lru; enum lru_list lru;
bool some_scanned;
int pass;
/*
* If the zone or memcg is small, nr[l] can be 0. When
* reclaiming for a memcg, a priority drop can cause high
* latencies, so it's better to scan a minimum amount. When a
* cgroup has already been deleted, scrape out the remaining
* cache forcefully to get rid of the lingering state.
*/
if (!global_reclaim(sc) || !mem_cgroup_online(memcg))
force_scan = true;
/* If we have no swap space, do not bother scanning anon pages. */ /* If we have no swap space, do not bother scanning anon pages. */
if (!sc->may_swap || mem_cgroup_get_nr_swap_pages(memcg) <= 0) { if (!sc->may_swap || mem_cgroup_get_nr_swap_pages(memcg) <= 0) {
...@@ -2268,9 +2255,6 @@ static void get_scan_count(struct lruvec *lruvec, struct mem_cgroup *memcg, ...@@ -2268,9 +2255,6 @@ static void get_scan_count(struct lruvec *lruvec, struct mem_cgroup *memcg,
fraction[1] = fp; fraction[1] = fp;
denominator = ap + fp + 1; denominator = ap + fp + 1;
out: out:
some_scanned = false;
/* Only use force_scan on second pass. */
for (pass = 0; !some_scanned && pass < 2; pass++) {
*lru_pages = 0; *lru_pages = 0;
for_each_evictable_lru(lru) { for_each_evictable_lru(lru) {
int file = is_file_lru(lru); int file = is_file_lru(lru);
...@@ -2279,8 +2263,11 @@ static void get_scan_count(struct lruvec *lruvec, struct mem_cgroup *memcg, ...@@ -2279,8 +2263,11 @@ static void get_scan_count(struct lruvec *lruvec, struct mem_cgroup *memcg,
size = lruvec_lru_size(lruvec, lru, sc->reclaim_idx); size = lruvec_lru_size(lruvec, lru, sc->reclaim_idx);
scan = size >> sc->priority; scan = size >> sc->priority;
/*
if (!scan && pass && force_scan) * If the cgroup's already been deleted, make sure to
* scrape out the remaining cache.
*/
if (!scan && !mem_cgroup_online(memcg))
scan = min(size, SWAP_CLUSTER_MAX); scan = min(size, SWAP_CLUSTER_MAX);
switch (scan_balance) { switch (scan_balance) {
...@@ -2310,13 +2297,6 @@ static void get_scan_count(struct lruvec *lruvec, struct mem_cgroup *memcg, ...@@ -2310,13 +2297,6 @@ static void get_scan_count(struct lruvec *lruvec, struct mem_cgroup *memcg,
*lru_pages += size; *lru_pages += size;
nr[lru] = scan; nr[lru] = scan;
/*
* Skip the second pass and don't force_scan,
* if we found something to scan.
*/
some_scanned |= !!scan;
}
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment