Commit 5e8eed27 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-5.8/dm-fixes' of...

Merge tag 'for-5.8/dm-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm

Pull device mapper fixes from Mike Snitzer:

 - Quite a few DM zoned target fixes and a Zone append fix in DM core.

   Considering the amount of dm-zoned changes that went in during the
   5.8 merge window these fixes are not that surprising.

 - A few DM writecache target fixes.

 - A fix to Documentation index to include DM ebs target docs.

 - Small cleanup to use struct_size() in DM core's retrieve_deps().

* tag 'for-5.8/dm-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm:
  dm writecache: add cond_resched to loop in persistent_memory_claim()
  dm zoned: Fix reclaim zone selection
  dm zoned: Fix random zone reclaim selection
  dm: update original bio sector on Zone Append
  dm zoned: Fix metadata zone size check
  docs: device-mapper: add dm-ebs.rst to an index file
  dm ioctl: use struct_size() helper in retrieve_deps()
  dm writecache: skip writecache_wait when using pmem mode
  dm writecache: correct uncommitted_block when discarding uncommitted entry
  dm zoned: assign max_io_len correctly
  dm zoned: fix uninitialized pointer dereference
parents 6116dea8 d35bd764
...@@ -11,6 +11,7 @@ Device Mapper ...@@ -11,6 +11,7 @@ Device Mapper
dm-clone dm-clone
dm-crypt dm-crypt
dm-dust dm-dust
dm-ebs
dm-flakey dm-flakey
dm-init dm-init
dm-integrity dm-integrity
......
...@@ -1471,7 +1471,7 @@ static void retrieve_deps(struct dm_table *table, ...@@ -1471,7 +1471,7 @@ static void retrieve_deps(struct dm_table *table,
/* /*
* Check we have enough space. * Check we have enough space.
*/ */
needed = sizeof(*deps) + (sizeof(*deps->dev) * count); needed = struct_size(deps, dev, count);
if (len < needed) { if (len < needed) {
param->flags |= DM_BUFFER_FULL_FLAG; param->flags |= DM_BUFFER_FULL_FLAG;
return; return;
......
...@@ -282,6 +282,8 @@ static int persistent_memory_claim(struct dm_writecache *wc) ...@@ -282,6 +282,8 @@ static int persistent_memory_claim(struct dm_writecache *wc)
while (daa-- && i < p) { while (daa-- && i < p) {
pages[i++] = pfn_t_to_page(pfn); pages[i++] = pfn_t_to_page(pfn);
pfn.val++; pfn.val++;
if (!(i & 15))
cond_resched();
} }
} while (i < p); } while (i < p);
wc->memory_map = vmap(pages, p, VM_MAP, PAGE_KERNEL); wc->memory_map = vmap(pages, p, VM_MAP, PAGE_KERNEL);
...@@ -849,10 +851,14 @@ static void writecache_discard(struct dm_writecache *wc, sector_t start, sector_ ...@@ -849,10 +851,14 @@ static void writecache_discard(struct dm_writecache *wc, sector_t start, sector_
if (likely(!e->write_in_progress)) { if (likely(!e->write_in_progress)) {
if (!discarded_something) { if (!discarded_something) {
if (!WC_MODE_PMEM(wc)) {
writecache_wait_for_ios(wc, READ); writecache_wait_for_ios(wc, READ);
writecache_wait_for_ios(wc, WRITE); writecache_wait_for_ios(wc, WRITE);
}
discarded_something = true; discarded_something = true;
} }
if (!writecache_entry_is_committed(wc, e))
wc->uncommitted_blocks--;
writecache_free_entry(wc, e); writecache_free_entry(wc, e);
} }
......
...@@ -1078,7 +1078,8 @@ static int dmz_check_sb(struct dmz_metadata *zmd, struct dmz_sb *dsb, ...@@ -1078,7 +1078,8 @@ static int dmz_check_sb(struct dmz_metadata *zmd, struct dmz_sb *dsb,
nr_meta_zones = (le32_to_cpu(sb->nr_meta_blocks) + zmd->zone_nr_blocks - 1) nr_meta_zones = (le32_to_cpu(sb->nr_meta_blocks) + zmd->zone_nr_blocks - 1)
>> zmd->zone_nr_blocks_shift; >> zmd->zone_nr_blocks_shift;
if (!nr_meta_zones || if (!nr_meta_zones ||
nr_meta_zones >= zmd->nr_rnd_zones) { (zmd->nr_devs <= 1 && nr_meta_zones >= zmd->nr_rnd_zones) ||
(zmd->nr_devs > 1 && nr_meta_zones >= zmd->nr_cache_zones)) {
dmz_dev_err(dev, "Invalid number of metadata blocks"); dmz_dev_err(dev, "Invalid number of metadata blocks");
return -ENXIO; return -ENXIO;
} }
...@@ -1949,7 +1950,7 @@ static struct dm_zone *dmz_get_rnd_zone_for_reclaim(struct dmz_metadata *zmd, ...@@ -1949,7 +1950,7 @@ static struct dm_zone *dmz_get_rnd_zone_for_reclaim(struct dmz_metadata *zmd,
unsigned int idx, bool idle) unsigned int idx, bool idle)
{ {
struct dm_zone *dzone = NULL; struct dm_zone *dzone = NULL;
struct dm_zone *zone, *last = NULL; struct dm_zone *zone, *maxw_z = NULL;
struct list_head *zone_list; struct list_head *zone_list;
/* If we have cache zones select from the cache zone list */ /* If we have cache zones select from the cache zone list */
...@@ -1961,18 +1962,37 @@ static struct dm_zone *dmz_get_rnd_zone_for_reclaim(struct dmz_metadata *zmd, ...@@ -1961,18 +1962,37 @@ static struct dm_zone *dmz_get_rnd_zone_for_reclaim(struct dmz_metadata *zmd,
} else } else
zone_list = &zmd->dev[idx].map_rnd_list; zone_list = &zmd->dev[idx].map_rnd_list;
/*
* Find the buffer zone with the heaviest weight or the first (oldest)
* data zone that can be reclaimed.
*/
list_for_each_entry(zone, zone_list, link) { list_for_each_entry(zone, zone_list, link) {
if (dmz_is_buf(zone)) { if (dmz_is_buf(zone)) {
dzone = zone->bzone; dzone = zone->bzone;
if (dzone->dev->dev_idx != idx) if (dmz_is_rnd(dzone) && dzone->dev->dev_idx != idx)
continue;
if (!last) {
last = dzone;
continue; continue;
if (!maxw_z || maxw_z->weight < dzone->weight)
maxw_z = dzone;
} else {
dzone = zone;
if (dmz_lock_zone_reclaim(dzone))
return dzone;
} }
if (last->weight < dzone->weight) }
if (maxw_z && dmz_lock_zone_reclaim(maxw_z))
return maxw_z;
/*
* If we come here, none of the zones inspected could be locked for
* reclaim. Try again, being more aggressive, that is, find the
* first zone that can be reclaimed regardless of its weitght.
*/
list_for_each_entry(zone, zone_list, link) {
if (dmz_is_buf(zone)) {
dzone = zone->bzone;
if (dmz_is_rnd(dzone) && dzone->dev->dev_idx != idx)
continue; continue;
dzone = last;
} else } else
dzone = zone; dzone = zone;
if (dmz_lock_zone_reclaim(dzone)) if (dmz_lock_zone_reclaim(dzone))
...@@ -2006,7 +2026,7 @@ static struct dm_zone *dmz_get_seq_zone_for_reclaim(struct dmz_metadata *zmd, ...@@ -2006,7 +2026,7 @@ static struct dm_zone *dmz_get_seq_zone_for_reclaim(struct dmz_metadata *zmd,
struct dm_zone *dmz_get_zone_for_reclaim(struct dmz_metadata *zmd, struct dm_zone *dmz_get_zone_for_reclaim(struct dmz_metadata *zmd,
unsigned int dev_idx, bool idle) unsigned int dev_idx, bool idle)
{ {
struct dm_zone *zone; struct dm_zone *zone = NULL;
/* /*
* Search for a zone candidate to reclaim: 2 cases are possible. * Search for a zone candidate to reclaim: 2 cases are possible.
...@@ -2019,7 +2039,7 @@ struct dm_zone *dmz_get_zone_for_reclaim(struct dmz_metadata *zmd, ...@@ -2019,7 +2039,7 @@ struct dm_zone *dmz_get_zone_for_reclaim(struct dmz_metadata *zmd,
dmz_lock_map(zmd); dmz_lock_map(zmd);
if (list_empty(&zmd->reserved_seq_zones_list)) if (list_empty(&zmd->reserved_seq_zones_list))
zone = dmz_get_seq_zone_for_reclaim(zmd, dev_idx); zone = dmz_get_seq_zone_for_reclaim(zmd, dev_idx);
else if (!zone)
zone = dmz_get_rnd_zone_for_reclaim(zmd, dev_idx, idle); zone = dmz_get_rnd_zone_for_reclaim(zmd, dev_idx, idle);
dmz_unlock_map(zmd); dmz_unlock_map(zmd);
......
...@@ -377,6 +377,7 @@ static int dmz_do_reclaim(struct dmz_reclaim *zrc) ...@@ -377,6 +377,7 @@ static int dmz_do_reclaim(struct dmz_reclaim *zrc)
dmz_metadata_label(zmd), zrc->dev_idx); dmz_metadata_label(zmd), zrc->dev_idx);
return -EBUSY; return -EBUSY;
} }
rzone = dzone;
start = jiffies; start = jiffies;
if (dmz_is_cache(dzone) || dmz_is_rnd(dzone)) { if (dmz_is_cache(dzone) || dmz_is_rnd(dzone)) {
...@@ -391,8 +392,6 @@ static int dmz_do_reclaim(struct dmz_reclaim *zrc) ...@@ -391,8 +392,6 @@ static int dmz_do_reclaim(struct dmz_reclaim *zrc)
*/ */
ret = dmz_reclaim_rnd_data(zrc, dzone); ret = dmz_reclaim_rnd_data(zrc, dzone);
} }
rzone = dzone;
} else { } else {
struct dm_zone *bzone = dzone->bzone; struct dm_zone *bzone = dzone->bzone;
sector_t chunk_block = 0; sector_t chunk_block = 0;
...@@ -415,7 +414,6 @@ static int dmz_do_reclaim(struct dmz_reclaim *zrc) ...@@ -415,7 +414,6 @@ static int dmz_do_reclaim(struct dmz_reclaim *zrc)
* be later reclaimed. * be later reclaimed.
*/ */
ret = dmz_reclaim_seq_data(zrc, dzone); ret = dmz_reclaim_seq_data(zrc, dzone);
rzone = dzone;
} }
} }
out: out:
......
...@@ -890,7 +890,7 @@ static int dmz_ctr(struct dm_target *ti, unsigned int argc, char **argv) ...@@ -890,7 +890,7 @@ static int dmz_ctr(struct dm_target *ti, unsigned int argc, char **argv)
} }
/* Set target (no write same support) */ /* Set target (no write same support) */
ti->max_io_len = dmz_zone_nr_sectors(dmz->metadata) << 9; ti->max_io_len = dmz_zone_nr_sectors(dmz->metadata);
ti->num_flush_bios = 1; ti->num_flush_bios = 1;
ti->num_discard_bios = 1; ti->num_discard_bios = 1;
ti->num_write_zeroes_bios = 1; ti->num_write_zeroes_bios = 1;
......
...@@ -1009,6 +1009,7 @@ static void clone_endio(struct bio *bio) ...@@ -1009,6 +1009,7 @@ static void clone_endio(struct bio *bio)
struct dm_io *io = tio->io; struct dm_io *io = tio->io;
struct mapped_device *md = tio->io->md; struct mapped_device *md = tio->io->md;
dm_endio_fn endio = tio->ti->type->end_io; dm_endio_fn endio = tio->ti->type->end_io;
struct bio *orig_bio = io->orig_bio;
if (unlikely(error == BLK_STS_TARGET) && md->type != DM_TYPE_NVME_BIO_BASED) { if (unlikely(error == BLK_STS_TARGET) && md->type != DM_TYPE_NVME_BIO_BASED) {
if (bio_op(bio) == REQ_OP_DISCARD && if (bio_op(bio) == REQ_OP_DISCARD &&
...@@ -1022,6 +1023,18 @@ static void clone_endio(struct bio *bio) ...@@ -1022,6 +1023,18 @@ static void clone_endio(struct bio *bio)
disable_write_zeroes(md); disable_write_zeroes(md);
} }
/*
* For zone-append bios get offset in zone of the written
* sector and add that to the original bio sector pos.
*/
if (bio_op(orig_bio) == REQ_OP_ZONE_APPEND) {
sector_t written_sector = bio->bi_iter.bi_sector;
struct request_queue *q = orig_bio->bi_disk->queue;
u64 mask = (u64)blk_queue_zone_sectors(q) - 1;
orig_bio->bi_iter.bi_sector += written_sector & mask;
}
if (endio) { if (endio) {
int r = endio(tio->ti, bio, &error); int r = endio(tio->ti, bio, &error);
switch (r) { switch (r) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment