Commit 53e9a615 authored by Martin Hicks's avatar Martin Hicks Committed by Linus Torvalds

[PATCH] VM: zone reclaim atomic ops cleanup

Christoph Lameter and Marcelo Tosatti asked to get rid of the
atomic_inc_and_test() to cleanup the atomic ops in the zone reclaim code.
Signed-off-by: default avatarMartin Hicks <mort@sgi.com>
Signed-off-by: default avatarChristoph Lameter <clameter@sgi.com>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent bce5f6ba
...@@ -1909,7 +1909,7 @@ static void __init free_area_init_core(struct pglist_data *pgdat, ...@@ -1909,7 +1909,7 @@ static void __init free_area_init_core(struct pglist_data *pgdat,
zone->nr_scan_inactive = 0; zone->nr_scan_inactive = 0;
zone->nr_active = 0; zone->nr_active = 0;
zone->nr_inactive = 0; zone->nr_inactive = 0;
atomic_set(&zone->reclaim_in_progress, -1); atomic_set(&zone->reclaim_in_progress, 0);
if (!size) if (!size)
continue; continue;
......
...@@ -822,6 +822,8 @@ shrink_zone(struct zone *zone, struct scan_control *sc) ...@@ -822,6 +822,8 @@ shrink_zone(struct zone *zone, struct scan_control *sc)
unsigned long nr_active; unsigned long nr_active;
unsigned long nr_inactive; unsigned long nr_inactive;
atomic_inc(&zone->reclaim_in_progress);
/* /*
* Add one to `nr_to_scan' just to make sure that the kernel will * Add one to `nr_to_scan' just to make sure that the kernel will
* slowly sift through the active list. * slowly sift through the active list.
...@@ -861,6 +863,8 @@ shrink_zone(struct zone *zone, struct scan_control *sc) ...@@ -861,6 +863,8 @@ shrink_zone(struct zone *zone, struct scan_control *sc)
} }
throttle_vm_writeout(); throttle_vm_writeout();
atomic_dec(&zone->reclaim_in_progress);
} }
/* /*
...@@ -900,9 +904,7 @@ shrink_caches(struct zone **zones, struct scan_control *sc) ...@@ -900,9 +904,7 @@ shrink_caches(struct zone **zones, struct scan_control *sc)
if (zone->all_unreclaimable && sc->priority != DEF_PRIORITY) if (zone->all_unreclaimable && sc->priority != DEF_PRIORITY)
continue; /* Let kswapd poll it */ continue; /* Let kswapd poll it */
atomic_inc(&zone->reclaim_in_progress);
shrink_zone(zone, sc); shrink_zone(zone, sc);
atomic_dec(&zone->reclaim_in_progress);
} }
} }
...@@ -1358,14 +1360,13 @@ int zone_reclaim(struct zone *zone, unsigned int gfp_mask, unsigned int order) ...@@ -1358,14 +1360,13 @@ int zone_reclaim(struct zone *zone, unsigned int gfp_mask, unsigned int order)
sc.swap_cluster_max = SWAP_CLUSTER_MAX; sc.swap_cluster_max = SWAP_CLUSTER_MAX;
/* Don't reclaim the zone if there are other reclaimers active */ /* Don't reclaim the zone if there are other reclaimers active */
if (!atomic_inc_and_test(&zone->reclaim_in_progress)) if (atomic_read(&zone->reclaim_in_progress) > 0)
goto out; goto out;
shrink_zone(zone, &sc); shrink_zone(zone, &sc);
total_reclaimed = sc.nr_reclaimed; total_reclaimed = sc.nr_reclaimed;
out: out:
atomic_dec(&zone->reclaim_in_progress);
return total_reclaimed; return total_reclaimed;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment