Commit 83aeeada authored by Konstantin Khlebnikov's avatar Konstantin Khlebnikov Committed by Linus Torvalds

vmscan: use atomic-long for shrinker batching

Use atomic-long operations instead of looping around cmpxchg().

[akpm@linux-foundation.org: massage atomic.h inclusions]
Signed-off-by: default avatarKonstantin Khlebnikov <khlebnikov@openvz.org>
Cc: Dave Chinner <david@fromorbit.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 635697c6
...@@ -393,8 +393,8 @@ struct inodes_stat_t { ...@@ -393,8 +393,8 @@ struct inodes_stat_t {
#include <linux/semaphore.h> #include <linux/semaphore.h>
#include <linux/fiemap.h> #include <linux/fiemap.h>
#include <linux/rculist_bl.h> #include <linux/rculist_bl.h>
#include <linux/shrinker.h>
#include <linux/atomic.h> #include <linux/atomic.h>
#include <linux/shrinker.h>
#include <asm/byteorder.h> #include <asm/byteorder.h>
......
...@@ -10,6 +10,7 @@ ...@@ -10,6 +10,7 @@
#include <linux/mmzone.h> #include <linux/mmzone.h>
#include <linux/rbtree.h> #include <linux/rbtree.h>
#include <linux/prio_tree.h> #include <linux/prio_tree.h>
#include <linux/atomic.h>
#include <linux/debug_locks.h> #include <linux/debug_locks.h>
#include <linux/mm_types.h> #include <linux/mm_types.h>
#include <linux/range.h> #include <linux/range.h>
......
...@@ -35,7 +35,7 @@ struct shrinker { ...@@ -35,7 +35,7 @@ struct shrinker {
/* These are for internal use */ /* These are for internal use */
struct list_head list; struct list_head list;
long nr; /* objs pending delete */ atomic_long_t nr_in_batch; /* objs pending delete */
}; };
#define DEFAULT_SEEKS 2 /* A good number if you don't know better. */ #define DEFAULT_SEEKS 2 /* A good number if you don't know better. */
extern void register_shrinker(struct shrinker *); extern void register_shrinker(struct shrinker *);
......
...@@ -183,7 +183,7 @@ static unsigned long zone_nr_lru_pages(struct zone *zone, ...@@ -183,7 +183,7 @@ static unsigned long zone_nr_lru_pages(struct zone *zone,
*/ */
void register_shrinker(struct shrinker *shrinker) void register_shrinker(struct shrinker *shrinker)
{ {
shrinker->nr = 0; atomic_long_set(&shrinker->nr_in_batch, 0);
down_write(&shrinker_rwsem); down_write(&shrinker_rwsem);
list_add_tail(&shrinker->list, &shrinker_list); list_add_tail(&shrinker->list, &shrinker_list);
up_write(&shrinker_rwsem); up_write(&shrinker_rwsem);
...@@ -264,9 +264,7 @@ unsigned long shrink_slab(struct shrink_control *shrink, ...@@ -264,9 +264,7 @@ unsigned long shrink_slab(struct shrink_control *shrink,
* and zero it so that other concurrent shrinker invocations * and zero it so that other concurrent shrinker invocations
* don't also do this scanning work. * don't also do this scanning work.
*/ */
do { nr = atomic_long_xchg(&shrinker->nr_in_batch, 0);
nr = shrinker->nr;
} while (cmpxchg(&shrinker->nr, nr, 0) != nr);
total_scan = nr; total_scan = nr;
delta = (4 * nr_pages_scanned) / shrinker->seeks; delta = (4 * nr_pages_scanned) / shrinker->seeks;
...@@ -328,12 +326,11 @@ unsigned long shrink_slab(struct shrink_control *shrink, ...@@ -328,12 +326,11 @@ unsigned long shrink_slab(struct shrink_control *shrink,
* manner that handles concurrent updates. If we exhausted the * manner that handles concurrent updates. If we exhausted the
* scan, there is no need to do an update. * scan, there is no need to do an update.
*/ */
do { if (total_scan > 0)
nr = shrinker->nr; new_nr = atomic_long_add_return(total_scan,
new_nr = total_scan + nr; &shrinker->nr_in_batch);
if (total_scan <= 0) else
break; new_nr = atomic_long_read(&shrinker->nr_in_batch);
} while (cmpxchg(&shrinker->nr, nr, new_nr) != nr);
trace_mm_shrink_slab_end(shrinker, shrink_ret, nr, new_nr); trace_mm_shrink_slab_end(shrinker, shrink_ret, nr, new_nr);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment