Commit 95b88f4d authored by Mikulas Patocka's avatar Mikulas Patocka Committed by Mike Snitzer

dm writecache: pause writeback if cache full and origin being written directly

Implementation reuses dm_io_tracker, that until now was only used by
dm-cache, to track if any writes were issued directly to the origin
(due to cache being full) within the last second. If so writeback is
paused for a second.

This change improves performance for when the cache is full and IO is
issued directly to the origin device (rather than through the cache).

Depends-on: d53f1faf ("dm writecache: do direct write if the cache is full")
Suggested-by: default avatarJoe Thornber <ejt@redhat.com>
Signed-off-by: default avatarMikulas Patocka <mpatocka@redhat.com>
Signed-off-by: default avatarMike Snitzer <snitzer@redhat.com>
parent dc4fa29f
...@@ -15,6 +15,8 @@ ...@@ -15,6 +15,8 @@
#include <linux/dax.h> #include <linux/dax.h>
#include <linux/pfn_t.h> #include <linux/pfn_t.h>
#include <linux/libnvdimm.h> #include <linux/libnvdimm.h>
#include <linux/delay.h>
#include "dm-io-tracker.h"
#define DM_MSG_PREFIX "writecache" #define DM_MSG_PREFIX "writecache"
...@@ -183,6 +185,8 @@ struct dm_writecache { ...@@ -183,6 +185,8 @@ struct dm_writecache {
struct work_struct writeback_work; struct work_struct writeback_work;
struct work_struct flush_work; struct work_struct flush_work;
struct dm_io_tracker iot;
struct dm_io_client *dm_io; struct dm_io_client *dm_io;
raw_spinlock_t endio_list_lock; raw_spinlock_t endio_list_lock;
...@@ -1466,6 +1470,10 @@ static int writecache_map(struct dm_target *ti, struct bio *bio) ...@@ -1466,6 +1470,10 @@ static int writecache_map(struct dm_target *ti, struct bio *bio)
} }
unlock_remap_origin: unlock_remap_origin:
if (bio_data_dir(bio) != READ) {
dm_iot_io_begin(&wc->iot, 1);
bio->bi_private = (void *)2;
}
bio_set_dev(bio, wc->dev->bdev); bio_set_dev(bio, wc->dev->bdev);
wc_unlock(wc); wc_unlock(wc);
return DM_MAPIO_REMAPPED; return DM_MAPIO_REMAPPED;
...@@ -1496,11 +1504,13 @@ static int writecache_end_io(struct dm_target *ti, struct bio *bio, blk_status_t ...@@ -1496,11 +1504,13 @@ static int writecache_end_io(struct dm_target *ti, struct bio *bio, blk_status_t
{ {
struct dm_writecache *wc = ti->private; struct dm_writecache *wc = ti->private;
if (bio->bi_private != NULL) { if (bio->bi_private == (void *)1) {
int dir = bio_data_dir(bio); int dir = bio_data_dir(bio);
if (atomic_dec_and_test(&wc->bio_in_progress[dir])) if (atomic_dec_and_test(&wc->bio_in_progress[dir]))
if (unlikely(waitqueue_active(&wc->bio_in_progress_wait[dir]))) if (unlikely(waitqueue_active(&wc->bio_in_progress_wait[dir])))
wake_up(&wc->bio_in_progress_wait[dir]); wake_up(&wc->bio_in_progress_wait[dir]);
} else if (bio->bi_private == (void *)2) {
dm_iot_io_end(&wc->iot, 1);
} }
return 0; return 0;
} }
...@@ -1827,6 +1837,13 @@ static void writecache_writeback(struct work_struct *work) ...@@ -1827,6 +1837,13 @@ static void writecache_writeback(struct work_struct *work)
dm_kcopyd_client_flush(wc->dm_kcopyd); dm_kcopyd_client_flush(wc->dm_kcopyd);
} }
if (!wc->writeback_all && !dm_suspended(wc->ti)) {
while (!dm_iot_idle_for(&wc->iot, HZ)) {
cond_resched();
msleep(1000);
}
}
wc_lock(wc); wc_lock(wc);
restart: restart:
if (writecache_has_error(wc)) { if (writecache_has_error(wc)) {
...@@ -2140,6 +2157,8 @@ static int writecache_ctr(struct dm_target *ti, unsigned argc, char **argv) ...@@ -2140,6 +2157,8 @@ static int writecache_ctr(struct dm_target *ti, unsigned argc, char **argv)
INIT_WORK(&wc->writeback_work, writecache_writeback); INIT_WORK(&wc->writeback_work, writecache_writeback);
INIT_WORK(&wc->flush_work, writecache_flush_work); INIT_WORK(&wc->flush_work, writecache_flush_work);
dm_iot_init(&wc->iot);
raw_spin_lock_init(&wc->endio_list_lock); raw_spin_lock_init(&wc->endio_list_lock);
INIT_LIST_HEAD(&wc->endio_list); INIT_LIST_HEAD(&wc->endio_list);
wc->endio_thread = kthread_create(writecache_endio_thread, wc, "writecache_endio"); wc->endio_thread = kthread_create(writecache_endio_thread, wc, "writecache_endio");
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment