Commit 20ba27f5 authored by Andrew Morton's avatar Andrew Morton Committed by Linus Torvalds

revert "memcg, vmscan: do not fall into reclaim-all pass too quickly"

Revert commit e975de99 ("memcg, vmscan: do not fall into reclaim-all
pass too quickly")

I merged this prematurely - Michal and Johannes still disagree about the
overall design direction and the future remains unclear.

Cc: Michal Hocko <mhocko@suse.cz>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 99d7a882
...@@ -2176,11 +2176,10 @@ static inline bool should_continue_reclaim(struct zone *zone, ...@@ -2176,11 +2176,10 @@ static inline bool should_continue_reclaim(struct zone *zone,
} }
} }
static int static void
__shrink_zone(struct zone *zone, struct scan_control *sc, bool soft_reclaim) __shrink_zone(struct zone *zone, struct scan_control *sc, bool soft_reclaim)
{ {
unsigned long nr_reclaimed, nr_scanned; unsigned long nr_reclaimed, nr_scanned;
int groups_scanned = 0;
do { do {
struct mem_cgroup *root = sc->target_mem_cgroup; struct mem_cgroup *root = sc->target_mem_cgroup;
...@@ -2198,7 +2197,6 @@ __shrink_zone(struct zone *zone, struct scan_control *sc, bool soft_reclaim) ...@@ -2198,7 +2197,6 @@ __shrink_zone(struct zone *zone, struct scan_control *sc, bool soft_reclaim)
while ((memcg = mem_cgroup_iter_cond(root, memcg, &reclaim, filter))) { while ((memcg = mem_cgroup_iter_cond(root, memcg, &reclaim, filter))) {
struct lruvec *lruvec; struct lruvec *lruvec;
groups_scanned++;
lruvec = mem_cgroup_zone_lruvec(zone, memcg); lruvec = mem_cgroup_zone_lruvec(zone, memcg);
shrink_lruvec(lruvec, sc); shrink_lruvec(lruvec, sc);
...@@ -2226,8 +2224,6 @@ __shrink_zone(struct zone *zone, struct scan_control *sc, bool soft_reclaim) ...@@ -2226,8 +2224,6 @@ __shrink_zone(struct zone *zone, struct scan_control *sc, bool soft_reclaim)
} while (should_continue_reclaim(zone, sc->nr_reclaimed - nr_reclaimed, } while (should_continue_reclaim(zone, sc->nr_reclaimed - nr_reclaimed,
sc->nr_scanned - nr_scanned, sc)); sc->nr_scanned - nr_scanned, sc));
return groups_scanned;
} }
...@@ -2235,18 +2231,7 @@ static void shrink_zone(struct zone *zone, struct scan_control *sc) ...@@ -2235,18 +2231,7 @@ static void shrink_zone(struct zone *zone, struct scan_control *sc)
{ {
bool do_soft_reclaim = mem_cgroup_should_soft_reclaim(sc); bool do_soft_reclaim = mem_cgroup_should_soft_reclaim(sc);
unsigned long nr_scanned = sc->nr_scanned; unsigned long nr_scanned = sc->nr_scanned;
int scanned_groups;
scanned_groups = __shrink_zone(zone, sc, do_soft_reclaim);
/*
* memcg iterator might race with other reclaimer or start from
* a incomplete tree walk so the tree walk in __shrink_zone
* might have missed groups that are above the soft limit. Try
* another loop to catch up with others. Do it just once to
* prevent from reclaim latencies when other reclaimers always
* preempt this one.
*/
if (do_soft_reclaim && !scanned_groups)
__shrink_zone(zone, sc, do_soft_reclaim); __shrink_zone(zone, sc, do_soft_reclaim);
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment