Commit d7428c50 authored by Dmitry Fomichev's avatar Dmitry Fomichev Committed by Mike Snitzer

dm zoned: improve error handling in i/o map code

Some errors are ignored in the I/O path during queueing chunks
for processing by chunk works. Since at least these errors are
transient in nature, it should be possible to retry the failed
incoming commands.

The fix -

Errors that can happen while queueing chunks are carried upwards
to the main mapping function and it now returns DM_MAPIO_REQUEUE
for any incoming requests that can not be properly queued.

Error logging/debug messages are added where needed.

Fixes: 3b1a94c8 ("dm zoned: drive-managed zoned block device target")
Cc: stable@vger.kernel.org
Signed-off-by: default avatarDmitry Fomichev <dmitry.fomichev@wdc.com>
Reviewed-by: default avatarDamien Le Moal <damien.lemoal@wdc.com>
Signed-off-by: default avatarMike Snitzer <snitzer@redhat.com>
parent b234c6d7
...@@ -513,22 +513,24 @@ static void dmz_flush_work(struct work_struct *work) ...@@ -513,22 +513,24 @@ static void dmz_flush_work(struct work_struct *work)
* Get a chunk work and start it to process a new BIO. * Get a chunk work and start it to process a new BIO.
* If the BIO chunk has no work yet, create one. * If the BIO chunk has no work yet, create one.
*/ */
static void dmz_queue_chunk_work(struct dmz_target *dmz, struct bio *bio) static int dmz_queue_chunk_work(struct dmz_target *dmz, struct bio *bio)
{ {
unsigned int chunk = dmz_bio_chunk(dmz->dev, bio); unsigned int chunk = dmz_bio_chunk(dmz->dev, bio);
struct dm_chunk_work *cw; struct dm_chunk_work *cw;
int ret = 0;
mutex_lock(&dmz->chunk_lock); mutex_lock(&dmz->chunk_lock);
/* Get the BIO chunk work. If one is not active yet, create one */ /* Get the BIO chunk work. If one is not active yet, create one */
cw = radix_tree_lookup(&dmz->chunk_rxtree, chunk); cw = radix_tree_lookup(&dmz->chunk_rxtree, chunk);
if (!cw) { if (!cw) {
int ret;
/* Create a new chunk work */ /* Create a new chunk work */
cw = kmalloc(sizeof(struct dm_chunk_work), GFP_NOIO); cw = kmalloc(sizeof(struct dm_chunk_work), GFP_NOIO);
if (!cw) if (unlikely(!cw)) {
ret = -ENOMEM;
goto out; goto out;
}
INIT_WORK(&cw->work, dmz_chunk_work); INIT_WORK(&cw->work, dmz_chunk_work);
refcount_set(&cw->refcount, 0); refcount_set(&cw->refcount, 0);
...@@ -539,7 +541,6 @@ static void dmz_queue_chunk_work(struct dmz_target *dmz, struct bio *bio) ...@@ -539,7 +541,6 @@ static void dmz_queue_chunk_work(struct dmz_target *dmz, struct bio *bio)
ret = radix_tree_insert(&dmz->chunk_rxtree, chunk, cw); ret = radix_tree_insert(&dmz->chunk_rxtree, chunk, cw);
if (unlikely(ret)) { if (unlikely(ret)) {
kfree(cw); kfree(cw);
cw = NULL;
goto out; goto out;
} }
} }
...@@ -547,10 +548,12 @@ static void dmz_queue_chunk_work(struct dmz_target *dmz, struct bio *bio) ...@@ -547,10 +548,12 @@ static void dmz_queue_chunk_work(struct dmz_target *dmz, struct bio *bio)
bio_list_add(&cw->bio_list, bio); bio_list_add(&cw->bio_list, bio);
dmz_get_chunk_work(cw); dmz_get_chunk_work(cw);
dmz_reclaim_bio_acc(dmz->reclaim);
if (queue_work(dmz->chunk_wq, &cw->work)) if (queue_work(dmz->chunk_wq, &cw->work))
dmz_get_chunk_work(cw); dmz_get_chunk_work(cw);
out: out:
mutex_unlock(&dmz->chunk_lock); mutex_unlock(&dmz->chunk_lock);
return ret;
} }
/* /*
...@@ -564,6 +567,7 @@ static int dmz_map(struct dm_target *ti, struct bio *bio) ...@@ -564,6 +567,7 @@ static int dmz_map(struct dm_target *ti, struct bio *bio)
sector_t sector = bio->bi_iter.bi_sector; sector_t sector = bio->bi_iter.bi_sector;
unsigned int nr_sectors = bio_sectors(bio); unsigned int nr_sectors = bio_sectors(bio);
sector_t chunk_sector; sector_t chunk_sector;
int ret;
dmz_dev_debug(dev, "BIO op %d sector %llu + %u => chunk %llu, block %llu, %u blocks", dmz_dev_debug(dev, "BIO op %d sector %llu + %u => chunk %llu, block %llu, %u blocks",
bio_op(bio), (unsigned long long)sector, nr_sectors, bio_op(bio), (unsigned long long)sector, nr_sectors,
...@@ -601,8 +605,14 @@ static int dmz_map(struct dm_target *ti, struct bio *bio) ...@@ -601,8 +605,14 @@ static int dmz_map(struct dm_target *ti, struct bio *bio)
dm_accept_partial_bio(bio, dev->zone_nr_sectors - chunk_sector); dm_accept_partial_bio(bio, dev->zone_nr_sectors - chunk_sector);
/* Now ready to handle this BIO */ /* Now ready to handle this BIO */
dmz_reclaim_bio_acc(dmz->reclaim); ret = dmz_queue_chunk_work(dmz, bio);
dmz_queue_chunk_work(dmz, bio); if (ret) {
dmz_dev_debug(dmz->dev,
"BIO op %d, can't process chunk %llu, err %i\n",
bio_op(bio), (u64)dmz_bio_chunk(dmz->dev, bio),
ret);
return DM_MAPIO_REQUEUE;
}
return DM_MAPIO_SUBMITTED; return DM_MAPIO_SUBMITTED;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment