Commit 70704c33 authored by Mikulas Patocka's avatar Mikulas Patocka Committed by Mike Snitzer

dm bufio: do buffer cleanup from a workqueue

Until now, DM bufio's waiting for IO from reclaim context in its
shrinker has caused kswapd to block; which results in systemic IO
stalls and even deadlock, e.g.:
https://www.redhat.com/archives/dm-devel/2020-March/msg00025.html

Here is Dave Chinner's problem description that motivated this fix,
from: https://lore.kernel.org/linux-fsdevel/20190809215733.GZ7777@dread.disaster.area/

"Waiting for IO in kswapd reclaim context is considered harmful -
kswapd context shrinker reclaim should be as non-blocking as possible,
and any back-off to wait for IO to complete should be done by the high
level reclaim core once it's completed an entire reclaim scan cycle of
everything....

What follows from that, and is pertinent in this situation, is that if
you don't block kswapd, then other reclaim contexts are not going to
get stuck waiting for it regardless of the reclaim context they use."

Continued elsewhere:

"The only way to fix this problem once and for all is to stop using
the shrinker as a mechanism to issue and wait on IO. If you need
background writeback of dirty buffers, do it from a WQ_MEM_RECLAIM
workqueue that isn't directly in the memory reclaim path and so can
issue writeback and block safely from a GFP_KERNEL context. Kick the
workqueue from the shrinker context, but get rid of the IO submission
and waiting from the shrinker and all the GFP_NOFS memory reclaim
recursion problems go away."

As such, this commit moves buffer cleanup to a workqueue.
Suggested-by: default avatarDave Chinner <dchinner@redhat.com>
Reported-by: default avatarTahsin Erdogan <tahsin@google.com>
Signed-off-by: default avatarMikulas Patocka <mpatocka@redhat.com>
Tested-by: default avatarGabriel Krisman Bertazi <krisman@collabora.com>
Signed-off-by: default avatarMike Snitzer <snitzer@redhat.com>
parent e766668c
...@@ -108,7 +108,10 @@ struct dm_bufio_client { ...@@ -108,7 +108,10 @@ struct dm_bufio_client {
int async_write_error; int async_write_error;
struct list_head client_list; struct list_head client_list;
struct shrinker shrinker; struct shrinker shrinker;
struct work_struct shrink_work;
atomic_long_t need_shrink;
}; };
/* /*
...@@ -1634,8 +1637,7 @@ static unsigned long get_retain_buffers(struct dm_bufio_client *c) ...@@ -1634,8 +1637,7 @@ static unsigned long get_retain_buffers(struct dm_bufio_client *c)
return retain_bytes; return retain_bytes;
} }
static unsigned long __scan(struct dm_bufio_client *c, unsigned long nr_to_scan, static void __scan(struct dm_bufio_client *c)
gfp_t gfp_mask)
{ {
int l; int l;
struct dm_buffer *b, *tmp; struct dm_buffer *b, *tmp;
...@@ -1646,42 +1648,58 @@ static unsigned long __scan(struct dm_bufio_client *c, unsigned long nr_to_scan, ...@@ -1646,42 +1648,58 @@ static unsigned long __scan(struct dm_bufio_client *c, unsigned long nr_to_scan,
for (l = 0; l < LIST_SIZE; l++) { for (l = 0; l < LIST_SIZE; l++) {
list_for_each_entry_safe_reverse(b, tmp, &c->lru[l], lru_list) { list_for_each_entry_safe_reverse(b, tmp, &c->lru[l], lru_list) {
if (__try_evict_buffer(b, gfp_mask)) if (count - freed <= retain_target)
atomic_long_set(&c->need_shrink, 0);
if (!atomic_long_read(&c->need_shrink))
return;
if (__try_evict_buffer(b, GFP_KERNEL)) {
atomic_long_dec(&c->need_shrink);
freed++; freed++;
if (!--nr_to_scan || ((count - freed) <= retain_target)) }
return freed;
cond_resched(); cond_resched();
} }
} }
return freed;
} }
static unsigned long static void shrink_work(struct work_struct *w)
dm_bufio_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) {
struct dm_bufio_client *c = container_of(w, struct dm_bufio_client, shrink_work);
dm_bufio_lock(c);
__scan(c);
dm_bufio_unlock(c);
}
static unsigned long dm_bufio_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
{ {
struct dm_bufio_client *c; struct dm_bufio_client *c;
unsigned long freed;
c = container_of(shrink, struct dm_bufio_client, shrinker); c = container_of(shrink, struct dm_bufio_client, shrinker);
if (sc->gfp_mask & __GFP_FS) atomic_long_add(sc->nr_to_scan, &c->need_shrink);
dm_bufio_lock(c); queue_work(dm_bufio_wq, &c->shrink_work);
else if (!dm_bufio_trylock(c))
return SHRINK_STOP;
freed = __scan(c, sc->nr_to_scan, sc->gfp_mask); return sc->nr_to_scan;
dm_bufio_unlock(c);
return freed;
} }
static unsigned long static unsigned long dm_bufio_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
dm_bufio_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
{ {
struct dm_bufio_client *c = container_of(shrink, struct dm_bufio_client, shrinker); struct dm_bufio_client *c = container_of(shrink, struct dm_bufio_client, shrinker);
unsigned long count = READ_ONCE(c->n_buffers[LIST_CLEAN]) + unsigned long count = READ_ONCE(c->n_buffers[LIST_CLEAN]) +
READ_ONCE(c->n_buffers[LIST_DIRTY]); READ_ONCE(c->n_buffers[LIST_DIRTY]);
unsigned long retain_target = get_retain_buffers(c); unsigned long retain_target = get_retain_buffers(c);
unsigned long queued_for_cleanup = atomic_long_read(&c->need_shrink);
if (unlikely(count < retain_target))
count = 0;
else
count -= retain_target;
if (unlikely(count < queued_for_cleanup))
count = 0;
else
count -= queued_for_cleanup;
return (count < retain_target) ? 0 : (count - retain_target); return count;
} }
/* /*
...@@ -1772,6 +1790,9 @@ struct dm_bufio_client *dm_bufio_client_create(struct block_device *bdev, unsign ...@@ -1772,6 +1790,9 @@ struct dm_bufio_client *dm_bufio_client_create(struct block_device *bdev, unsign
__free_buffer_wake(b); __free_buffer_wake(b);
} }
INIT_WORK(&c->shrink_work, shrink_work);
atomic_long_set(&c->need_shrink, 0);
c->shrinker.count_objects = dm_bufio_shrink_count; c->shrinker.count_objects = dm_bufio_shrink_count;
c->shrinker.scan_objects = dm_bufio_shrink_scan; c->shrinker.scan_objects = dm_bufio_shrink_scan;
c->shrinker.seeks = 1; c->shrinker.seeks = 1;
...@@ -1817,6 +1838,7 @@ void dm_bufio_client_destroy(struct dm_bufio_client *c) ...@@ -1817,6 +1838,7 @@ void dm_bufio_client_destroy(struct dm_bufio_client *c)
drop_buffers(c); drop_buffers(c);
unregister_shrinker(&c->shrinker); unregister_shrinker(&c->shrinker);
flush_work(&c->shrink_work);
mutex_lock(&dm_bufio_clients_lock); mutex_lock(&dm_bufio_clients_lock);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment