Commit 6cfe57a9 authored by Sebastian Andrzej Siewior's avatar Sebastian Andrzej Siewior Committed by Linus Torvalds

mm/list_lru.c: move locking from __list_lru_walk_one() to its caller

Move the locking inside __list_lru_walk_one() to its caller.  This is a
preparation step in order to introduce list_lru_walk_one_irq() which
does spin_lock_irq() instead of spin_lock() for the locking.

Link: http://lkml.kernel.org/r/20180716111921.5365-3-bigeasy@linutronix.deSigned-off-by: default avatarSebastian Andrzej Siewior <bigeasy@linutronix.de>
Reviewed-by: default avatarVladimir Davydov <vdavydov.dev@gmail.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 87a5ffc1
...@@ -219,7 +219,6 @@ __list_lru_walk_one(struct list_lru *lru, int nid, int memcg_idx, ...@@ -219,7 +219,6 @@ __list_lru_walk_one(struct list_lru *lru, int nid, int memcg_idx,
struct list_head *item, *n; struct list_head *item, *n;
unsigned long isolated = 0; unsigned long isolated = 0;
spin_lock(&nlru->lock);
l = list_lru_from_memcg_idx(nlru, memcg_idx); l = list_lru_from_memcg_idx(nlru, memcg_idx);
restart: restart:
list_for_each_safe(item, n, &l->list) { list_for_each_safe(item, n, &l->list) {
...@@ -265,8 +264,6 @@ __list_lru_walk_one(struct list_lru *lru, int nid, int memcg_idx, ...@@ -265,8 +264,6 @@ __list_lru_walk_one(struct list_lru *lru, int nid, int memcg_idx,
BUG(); BUG();
} }
} }
spin_unlock(&nlru->lock);
return isolated; return isolated;
} }
...@@ -275,8 +272,14 @@ list_lru_walk_one(struct list_lru *lru, int nid, struct mem_cgroup *memcg, ...@@ -275,8 +272,14 @@ list_lru_walk_one(struct list_lru *lru, int nid, struct mem_cgroup *memcg,
list_lru_walk_cb isolate, void *cb_arg, list_lru_walk_cb isolate, void *cb_arg,
unsigned long *nr_to_walk) unsigned long *nr_to_walk)
{ {
return __list_lru_walk_one(lru, nid, memcg_cache_id(memcg), struct list_lru_node *nlru = &lru->node[nid];
isolate, cb_arg, nr_to_walk); unsigned long ret;
spin_lock(&nlru->lock);
ret = __list_lru_walk_one(lru, nid, memcg_cache_id(memcg),
isolate, cb_arg, nr_to_walk);
spin_unlock(&nlru->lock);
return ret;
} }
EXPORT_SYMBOL_GPL(list_lru_walk_one); EXPORT_SYMBOL_GPL(list_lru_walk_one);
...@@ -291,8 +294,13 @@ unsigned long list_lru_walk_node(struct list_lru *lru, int nid, ...@@ -291,8 +294,13 @@ unsigned long list_lru_walk_node(struct list_lru *lru, int nid,
nr_to_walk); nr_to_walk);
if (*nr_to_walk > 0 && list_lru_memcg_aware(lru)) { if (*nr_to_walk > 0 && list_lru_memcg_aware(lru)) {
for_each_memcg_cache_index(memcg_idx) { for_each_memcg_cache_index(memcg_idx) {
struct list_lru_node *nlru = &lru->node[nid];
spin_lock(&nlru->lock);
isolated += __list_lru_walk_one(lru, nid, memcg_idx, isolated += __list_lru_walk_one(lru, nid, memcg_idx,
isolate, cb_arg, nr_to_walk); isolate, cb_arg, nr_to_walk);
spin_unlock(&nlru->lock);
if (*nr_to_walk <= 0) if (*nr_to_walk <= 0)
break; break;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment