Commit 15748048 authored by KOSAKI Motohiro's avatar KOSAKI Motohiro Committed by Linus Torvalds

vmscan: avoid subtraction of unsigned types

'slab_reclaimable' and 'nr_pages' are unsigned.  Subtraction is unsafe
because negative results would be misinterpreted.
Signed-off-by: default avatarKOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Reviewed-by: default avatarMinchan Kim <minchan.kim@gmail.com>
Cc: Mel Gorman <mel@csn.ul.ie>
Cc: Rik van Riel <riel@redhat.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 7ee92255
...@@ -2600,7 +2600,7 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order) ...@@ -2600,7 +2600,7 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
.swappiness = vm_swappiness, .swappiness = vm_swappiness,
.order = order, .order = order,
}; };
unsigned long slab_reclaimable; unsigned long nr_slab_pages0, nr_slab_pages1;
cond_resched(); cond_resched();
/* /*
...@@ -2625,8 +2625,8 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order) ...@@ -2625,8 +2625,8 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
} while (priority >= 0 && sc.nr_reclaimed < nr_pages); } while (priority >= 0 && sc.nr_reclaimed < nr_pages);
} }
slab_reclaimable = zone_page_state(zone, NR_SLAB_RECLAIMABLE); nr_slab_pages0 = zone_page_state(zone, NR_SLAB_RECLAIMABLE);
if (slab_reclaimable > zone->min_slab_pages) { if (nr_slab_pages0 > zone->min_slab_pages) {
/* /*
* shrink_slab() does not currently allow us to determine how * shrink_slab() does not currently allow us to determine how
* many pages were freed in this zone. So we take the current * many pages were freed in this zone. So we take the current
...@@ -2638,16 +2638,17 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order) ...@@ -2638,16 +2638,17 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
* take a long time. * take a long time.
*/ */
while (shrink_slab(sc.nr_scanned, gfp_mask, order) && while (shrink_slab(sc.nr_scanned, gfp_mask, order) &&
zone_page_state(zone, NR_SLAB_RECLAIMABLE) > (zone_page_state(zone, NR_SLAB_RECLAIMABLE) + nr_pages >
slab_reclaimable - nr_pages) nr_slab_pages0))
; ;
/* /*
* Update nr_reclaimed by the number of slab pages we * Update nr_reclaimed by the number of slab pages we
* reclaimed from this zone. * reclaimed from this zone.
*/ */
sc.nr_reclaimed += slab_reclaimable - nr_slab_pages1 = zone_page_state(zone, NR_SLAB_RECLAIMABLE);
zone_page_state(zone, NR_SLAB_RECLAIMABLE); if (nr_slab_pages1 < nr_slab_pages0)
sc.nr_reclaimed += nr_slab_pages0 - nr_slab_pages1;
} }
p->reclaim_state = NULL; p->reclaim_state = NULL;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment