Commit 6080ad3a authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-linus-20181026' of git://git.kernel.dk/linux-block

Pull more block layer updates from Jens Axboe:

 - Set of patches improving support for zoned devices. This was ready
   before the merge window, but I was late in picking it up and hence it
   missed the original pull request (Damien, Christoph)

 - libata no link power management quirk addition for a Samsung drive
   (Diego Viola)

 - Fix for a performance regression in BFQ that went into this merge
   window (Federico Motta)

 - Fix for a missing dma mask setting return value check (Gustavo)

 - Typo in the gdrom queue failure case (me)

 - NULL pointer deref fix for xen-blkfront (Vasilis Liaskovitis)

 - Fixing the get_rq trace point placement in blk-mq (Xiaoguang Wang)

 - Removal of a set-but-not-read variable in cdrom (zhong jiang)

* tag 'for-linus-20181026' of git://git.kernel.dk/linux-block:
  libata: Apply NOLPM quirk for SAMSUNG MZ7TD256HAFV-000L9
  block, bfq: fix asymmetric scenarios detection
  gdrom: fix mistake in assignment of error
  blk-mq: place trace_block_getrq() in correct place
  block: Introduce blk_revalidate_disk_zones()
  block: add a report_zones method
  block: Expose queue nr_zones in sysfs
  block: Improve zone reset execution
  block: Introduce BLKGETNRZONES ioctl
  block: Introduce BLKGETZONESZ ioctl
  block: Limit allocation of zone descriptors for report zones
  block: Introduce blkdev_nr_zones() helper
  scsi: sd_zbc: Fix sd_zbc_check_zones() error checks
  scsi: sd_zbc: Reduce boot device scan and revalidate time
  scsi: sd_zbc: Rearrange code
  cdrom: remove set but not used variable 'tocuse'
  skd: fix unchecked return values
  xen/blkfront: avoid NULL blkfront_info dereference on device removal
parents b27186ab a435ab4f
......@@ -792,24 +792,18 @@ __bfq_entity_update_weight_prio(struct bfq_service_tree *old_st,
* queue, remove the entity from its old weight counter (if
* there is a counter associated with the entity).
*/
if (prev_weight != new_weight) {
if (bfqq) {
root = &bfqd->queue_weights_tree;
__bfq_weights_tree_remove(bfqd, bfqq, root);
} else
bfqd->num_active_groups--;
if (prev_weight != new_weight && bfqq) {
root = &bfqd->queue_weights_tree;
__bfq_weights_tree_remove(bfqd, bfqq, root);
}
entity->weight = new_weight;
/*
* Add the entity, if it is not a weight-raised queue,
* to the counter associated with its new weight.
*/
if (prev_weight != new_weight) {
if (bfqq && bfqq->wr_coeff == 1) {
/* If we get here, root has been initialized. */
bfq_weights_tree_add(bfqd, bfqq, root);
} else
bfqd->num_active_groups++;
if (prev_weight != new_weight && bfqq && bfqq->wr_coeff == 1) {
/* If we get here, root has been initialized. */
bfq_weights_tree_add(bfqd, bfqq, root);
}
new_st->wsum += entity->weight;
......
......@@ -2300,7 +2300,6 @@ generic_make_request_checks(struct bio *bio)
if (!q->limits.max_write_same_sectors)
goto not_supported;
break;
case REQ_OP_ZONE_REPORT:
case REQ_OP_ZONE_RESET:
if (!blk_queue_is_zoned(q))
goto not_supported;
......
......@@ -10,8 +10,7 @@
#include "blk.h"
static struct bio *next_bio(struct bio *bio, unsigned int nr_pages,
gfp_t gfp)
struct bio *blk_next_bio(struct bio *bio, unsigned int nr_pages, gfp_t gfp)
{
struct bio *new = bio_alloc(gfp, nr_pages);
......@@ -63,7 +62,7 @@ int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
end_sect = sector + req_sects;
bio = next_bio(bio, 0, gfp_mask);
bio = blk_next_bio(bio, 0, gfp_mask);
bio->bi_iter.bi_sector = sector;
bio_set_dev(bio, bdev);
bio_set_op_attrs(bio, op, 0);
......@@ -165,7 +164,7 @@ static int __blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
max_write_same_sectors = UINT_MAX >> 9;
while (nr_sects) {
bio = next_bio(bio, 1, gfp_mask);
bio = blk_next_bio(bio, 1, gfp_mask);
bio->bi_iter.bi_sector = sector;
bio_set_dev(bio, bdev);
bio->bi_vcnt = 1;
......@@ -241,7 +240,7 @@ static int __blkdev_issue_write_zeroes(struct block_device *bdev,
return -EOPNOTSUPP;
while (nr_sects) {
bio = next_bio(bio, 0, gfp_mask);
bio = blk_next_bio(bio, 0, gfp_mask);
bio->bi_iter.bi_sector = sector;
bio_set_dev(bio, bdev);
bio->bi_opf = REQ_OP_WRITE_ZEROES;
......@@ -292,8 +291,8 @@ static int __blkdev_issue_zero_pages(struct block_device *bdev,
return -EPERM;
while (nr_sects != 0) {
bio = next_bio(bio, __blkdev_sectors_to_bio_pages(nr_sects),
gfp_mask);
bio = blk_next_bio(bio, __blkdev_sectors_to_bio_pages(nr_sects),
gfp_mask);
bio->bi_iter.bi_sector = sector;
bio_set_dev(bio, bdev);
bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
......
......@@ -283,7 +283,6 @@ static const char *const op_name[] = {
REQ_OP_NAME(WRITE),
REQ_OP_NAME(FLUSH),
REQ_OP_NAME(DISCARD),
REQ_OP_NAME(ZONE_REPORT),
REQ_OP_NAME(SECURE_ERASE),
REQ_OP_NAME(ZONE_RESET),
REQ_OP_NAME(WRITE_SAME),
......
......@@ -1850,8 +1850,6 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
rq_qos_throttle(q, bio, NULL);
trace_block_getrq(q, bio, bio->bi_opf);
rq = blk_mq_get_request(q, bio, bio->bi_opf, &data);
if (unlikely(!rq)) {
rq_qos_cleanup(q, bio);
......@@ -1860,6 +1858,8 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
return BLK_QC_T_NONE;
}
trace_block_getrq(q, bio, bio->bi_opf);
rq_qos_track(q, rq, bio);
cookie = request_to_qc_t(data.hctx, rq);
......
......@@ -300,6 +300,11 @@ static ssize_t queue_zoned_show(struct request_queue *q, char *page)
}
}
static ssize_t queue_nr_zones_show(struct request_queue *q, char *page)
{
return queue_var_show(blk_queue_nr_zones(q), page);
}
static ssize_t queue_nomerges_show(struct request_queue *q, char *page)
{
return queue_var_show((blk_queue_nomerges(q) << 1) |
......@@ -637,6 +642,11 @@ static struct queue_sysfs_entry queue_zoned_entry = {
.show = queue_zoned_show,
};
static struct queue_sysfs_entry queue_nr_zones_entry = {
.attr = {.name = "nr_zones", .mode = 0444 },
.show = queue_nr_zones_show,
};
static struct queue_sysfs_entry queue_nomerges_entry = {
.attr = {.name = "nomerges", .mode = 0644 },
.show = queue_nomerges_show,
......@@ -727,6 +737,7 @@ static struct attribute *default_attrs[] = {
&queue_write_zeroes_max_entry.attr,
&queue_nonrot_entry.attr,
&queue_zoned_entry.attr,
&queue_nr_zones_entry.attr,
&queue_nomerges_entry.attr,
&queue_rq_affinity_entry.attr,
&queue_iostats_entry.attr,
......@@ -841,6 +852,8 @@ static void __blk_release_queue(struct work_struct *work)
if (q->queue_tags)
__blk_queue_free_tags(q);
blk_queue_free_zone_bitmaps(q);
if (!q->mq_ops) {
if (q->exit_rq_fn)
q->exit_rq_fn(q, q->fq->flush_rq);
......
This diff is collapsed.
......@@ -488,4 +488,12 @@ extern int blk_iolatency_init(struct request_queue *q);
static inline int blk_iolatency_init(struct request_queue *q) { return 0; }
#endif
struct bio *blk_next_bio(struct bio *bio, unsigned int nr_pages, gfp_t gfp);
#ifdef CONFIG_BLK_DEV_ZONED
void blk_queue_free_zone_bitmaps(struct request_queue *q);
#else
static inline void blk_queue_free_zone_bitmaps(struct request_queue *q) {}
#endif
#endif /* BLK_INTERNAL_H */
......@@ -532,6 +532,10 @@ int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
return blkdev_report_zones_ioctl(bdev, mode, cmd, arg);
case BLKRESETZONE:
return blkdev_reset_zones_ioctl(bdev, mode, cmd, arg);
case BLKGETZONESZ:
return put_uint(arg, bdev_zone_sectors(bdev));
case BLKGETNRZONES:
return put_uint(arg, blkdev_nr_zones(bdev));
case HDIO_GETGEO:
return blkdev_getgeo(bdev, argp);
case BLKRAGET:
......
......@@ -4553,6 +4553,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
/* These specific Samsung models/firmware-revs do not handle LPM well */
{ "SAMSUNG MZMPC128HBFU-000MV", "CXM14M1Q", ATA_HORKAGE_NOLPM, },
{ "SAMSUNG SSD PM830 mSATA *", "CXM13D1Q", ATA_HORKAGE_NOLPM, },
{ "SAMSUNG MZ7TD256HAFV-000L9", "DXT02L5Q", ATA_HORKAGE_NOLPM, },
/* devices that don't properly handle queued TRIM commands */
{ "Micron_M500IT_*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
......
......@@ -87,7 +87,9 @@ struct nullb {
#ifdef CONFIG_BLK_DEV_ZONED
int null_zone_init(struct nullb_device *dev);
void null_zone_exit(struct nullb_device *dev);
blk_status_t null_zone_report(struct nullb *nullb, struct bio *bio);
int null_zone_report(struct gendisk *disk, sector_t sector,
struct blk_zone *zones, unsigned int *nr_zones,
gfp_t gfp_mask);
void null_zone_write(struct nullb_cmd *cmd, sector_t sector,
unsigned int nr_sectors);
void null_zone_reset(struct nullb_cmd *cmd, sector_t sector);
......@@ -97,10 +99,11 @@ static inline int null_zone_init(struct nullb_device *dev)
return -EINVAL;
}
static inline void null_zone_exit(struct nullb_device *dev) {}
static inline blk_status_t null_zone_report(struct nullb *nullb,
struct bio *bio)
static inline int null_zone_report(struct gendisk *disk, sector_t sector,
struct blk_zone *zones,
unsigned int *nr_zones, gfp_t gfp_mask)
{
return BLK_STS_NOTSUPP;
return -EOPNOTSUPP;
}
static inline void null_zone_write(struct nullb_cmd *cmd, sector_t sector,
unsigned int nr_sectors)
......
......@@ -1129,34 +1129,12 @@ static void null_restart_queue_async(struct nullb *nullb)
blk_mq_start_stopped_hw_queues(q, true);
}
static bool cmd_report_zone(struct nullb *nullb, struct nullb_cmd *cmd)
{
struct nullb_device *dev = cmd->nq->dev;
if (dev->queue_mode == NULL_Q_BIO) {
if (bio_op(cmd->bio) == REQ_OP_ZONE_REPORT) {
cmd->error = null_zone_report(nullb, cmd->bio);
return true;
}
} else {
if (req_op(cmd->rq) == REQ_OP_ZONE_REPORT) {
cmd->error = null_zone_report(nullb, cmd->rq->bio);
return true;
}
}
return false;
}
static blk_status_t null_handle_cmd(struct nullb_cmd *cmd)
{
struct nullb_device *dev = cmd->nq->dev;
struct nullb *nullb = dev->nullb;
int err = 0;
if (cmd_report_zone(nullb, cmd))
goto out;
if (test_bit(NULLB_DEV_FL_THROTTLED, &dev->flags)) {
struct request *rq = cmd->rq;
......@@ -1443,6 +1421,7 @@ static const struct block_device_operations null_fops = {
.owner = THIS_MODULE,
.open = null_open,
.release = null_release,
.report_zones = null_zone_report,
};
static void null_init_queue(struct nullb *nullb, struct nullb_queue *nq)
......@@ -1549,6 +1528,13 @@ static int null_gendisk_register(struct nullb *nullb)
disk->queue = nullb->q;
strncpy(disk->disk_name, nullb->disk_name, DISK_NAME_LEN);
if (nullb->dev->zoned) {
int ret = blk_revalidate_disk_zones(disk);
if (ret != 0)
return ret;
}
add_disk(disk);
return 0;
}
......
......@@ -48,54 +48,27 @@ void null_zone_exit(struct nullb_device *dev)
kvfree(dev->zones);
}
static void null_zone_fill_bio(struct nullb_device *dev, struct bio *bio,
unsigned int zno, unsigned int nr_zones)
int null_zone_report(struct gendisk *disk, sector_t sector,
struct blk_zone *zones, unsigned int *nr_zones,
gfp_t gfp_mask)
{
struct blk_zone_report_hdr *hdr = NULL;
struct bio_vec bvec;
struct bvec_iter iter;
void *addr;
unsigned int zones_to_cpy;
bio_for_each_segment(bvec, bio, iter) {
addr = kmap_atomic(bvec.bv_page);
zones_to_cpy = bvec.bv_len / sizeof(struct blk_zone);
if (!hdr) {
hdr = (struct blk_zone_report_hdr *)addr;
hdr->nr_zones = nr_zones;
zones_to_cpy--;
addr += sizeof(struct blk_zone_report_hdr);
}
zones_to_cpy = min_t(unsigned int, zones_to_cpy, nr_zones);
memcpy(addr, &dev->zones[zno],
zones_to_cpy * sizeof(struct blk_zone));
kunmap_atomic(addr);
struct nullb *nullb = disk->private_data;
struct nullb_device *dev = nullb->dev;
unsigned int zno, nrz = 0;
nr_zones -= zones_to_cpy;
zno += zones_to_cpy;
if (!dev->zoned)
/* Not a zoned null device */
return -EOPNOTSUPP;
if (!nr_zones)
break;
zno = null_zone_no(dev, sector);
if (zno < dev->nr_zones) {
nrz = min_t(unsigned int, *nr_zones, dev->nr_zones - zno);
memcpy(zones, &dev->zones[zno], nrz * sizeof(struct blk_zone));
}
}
blk_status_t null_zone_report(struct nullb *nullb, struct bio *bio)
{
struct nullb_device *dev = nullb->dev;
unsigned int zno = null_zone_no(dev, bio->bi_iter.bi_sector);
unsigned int nr_zones = dev->nr_zones - zno;
unsigned int max_zones;
*nr_zones = nrz;
max_zones = (bio->bi_iter.bi_size / sizeof(struct blk_zone)) - 1;
nr_zones = min_t(unsigned int, nr_zones, max_zones);
null_zone_fill_bio(nullb->dev, bio, zno, nr_zones);
return BLK_STS_OK;
return 0;
}
void null_zone_write(struct nullb_cmd *cmd, sector_t sector,
......
......@@ -3175,7 +3175,7 @@ static int skd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_out;
rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
if (rc)
dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (rc) {
dev_err(&pdev->dev, "DMA mask error %d\n", rc);
goto err_out_regions;
......@@ -3364,7 +3364,7 @@ static int skd_pci_resume(struct pci_dev *pdev)
goto err_out;
rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
if (rc)
dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (rc) {
dev_err(&pdev->dev, "DMA mask error %d\n", rc);
goto err_out_regions;
......
......@@ -2493,6 +2493,9 @@ static int blkfront_remove(struct xenbus_device *xbdev)
dev_dbg(&xbdev->dev, "%s removed", xbdev->nodename);
if (!info)
return 0;
blkif_free(info, 0);
mutex_lock(&info->mutex);
......
......@@ -327,15 +327,15 @@ static int get_entry_track(int track)
static int gdrom_get_last_session(struct cdrom_device_info *cd_info,
struct cdrom_multisession *ms_info)
{
int fentry, lentry, track, data, tocuse, err;
int fentry, lentry, track, data, err;
if (!gd.toc)
return -ENOMEM;
tocuse = 1;
/* Check if GD-ROM */
err = gdrom_readtoc_cmd(gd.toc, 1);
/* Not a GD-ROM so check if standard CD-ROM */
if (err) {
tocuse = 0;
err = gdrom_readtoc_cmd(gd.toc, 0);
if (err) {
pr_info("Could not get CD table of contents\n");
......@@ -794,7 +794,7 @@ static int probe_gdrom(struct platform_device *devptr)
gd.gdrom_rq = blk_mq_init_sq_queue(&gd.tag_set, &gdrom_mq_ops, 1,
BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING);
if (IS_ERR(gd.gdrom_rq)) {
rc = PTR_ERR(gd.gdrom_rq);
err = PTR_ERR(gd.gdrom_rq);
gd.gdrom_rq = NULL;
goto probe_fail_requestq;
}
......
......@@ -315,10 +315,6 @@ static int flakey_map(struct dm_target *ti, struct bio *bio)
if (bio_op(bio) == REQ_OP_ZONE_RESET)
goto map_bio;
/* We need to remap reported zones, so remember the BIO iter */
if (bio_op(bio) == REQ_OP_ZONE_REPORT)
goto map_bio;
/* Are we alive ? */
elapsed = (jiffies - fc->start_time) / HZ;
if (elapsed % (fc->up_interval + fc->down_interval) >= fc->up_interval) {
......@@ -380,11 +376,6 @@ static int flakey_end_io(struct dm_target *ti, struct bio *bio,
if (bio_op(bio) == REQ_OP_ZONE_RESET)
return DM_ENDIO_DONE;
if (bio_op(bio) == REQ_OP_ZONE_REPORT) {
dm_remap_zone_report(ti, bio, fc->start);
return DM_ENDIO_DONE;
}
if (!*error && pb->bio_submitted && (bio_data_dir(bio) == READ)) {
if (fc->corrupt_bio_byte && (fc->corrupt_bio_rw == READ) &&
all_corrupt_bio_flags_match(bio, fc)) {
......@@ -457,6 +448,26 @@ static int flakey_prepare_ioctl(struct dm_target *ti, struct block_device **bdev
return 0;
}
#ifdef CONFIG_BLK_DEV_ZONED
static int flakey_report_zones(struct dm_target *ti, sector_t sector,
struct blk_zone *zones, unsigned int *nr_zones,
gfp_t gfp_mask)
{
struct flakey_c *fc = ti->private;
int ret;
/* Do report and remap it */
ret = blkdev_report_zones(fc->dev->bdev, flakey_map_sector(ti, sector),
zones, nr_zones, gfp_mask);
if (ret != 0)
return ret;
if (*nr_zones)
dm_remap_zone_report(ti, fc->start, zones, nr_zones);
return 0;
}
#endif
static int flakey_iterate_devices(struct dm_target *ti, iterate_devices_callout_fn fn, void *data)
{
struct flakey_c *fc = ti->private;
......@@ -469,6 +480,7 @@ static struct target_type flakey_target = {
.version = {1, 5, 0},
#ifdef CONFIG_BLK_DEV_ZONED
.features = DM_TARGET_ZONED_HM,
.report_zones = flakey_report_zones,
#endif
.module = THIS_MODULE,
.ctr = flakey_ctr,
......
......@@ -102,19 +102,6 @@ static int linear_map(struct dm_target *ti, struct bio *bio)
return DM_MAPIO_REMAPPED;
}
#ifdef CONFIG_BLK_DEV_ZONED
static int linear_end_io(struct dm_target *ti, struct bio *bio,
blk_status_t *error)
{
struct linear_c *lc = ti->private;
if (!*error && bio_op(bio) == REQ_OP_ZONE_REPORT)
dm_remap_zone_report(ti, bio, lc->start);
return DM_ENDIO_DONE;
}
#endif
static void linear_status(struct dm_target *ti, status_type_t type,
unsigned status_flags, char *result, unsigned maxlen)
{
......@@ -148,6 +135,26 @@ static int linear_prepare_ioctl(struct dm_target *ti, struct block_device **bdev
return 0;
}
#ifdef CONFIG_BLK_DEV_ZONED
static int linear_report_zones(struct dm_target *ti, sector_t sector,
struct blk_zone *zones, unsigned int *nr_zones,
gfp_t gfp_mask)
{
struct linear_c *lc = (struct linear_c *) ti->private;
int ret;
/* Do report and remap it */
ret = blkdev_report_zones(lc->dev->bdev, linear_map_sector(ti, sector),
zones, nr_zones, gfp_mask);
if (ret != 0)
return ret;
if (*nr_zones)
dm_remap_zone_report(ti, lc->start, zones, nr_zones);
return 0;
}
#endif
static int linear_iterate_devices(struct dm_target *ti,
iterate_devices_callout_fn fn, void *data)
{
......@@ -211,8 +218,8 @@ static struct target_type linear_target = {
.name = "linear",
.version = {1, 4, 0},
#ifdef CONFIG_BLK_DEV_ZONED
.end_io = linear_end_io,
.features = DM_TARGET_PASSES_INTEGRITY | DM_TARGET_ZONED_HM,
.report_zones = linear_report_zones,
#else
.features = DM_TARGET_PASSES_INTEGRITY,
#endif
......
......@@ -1937,6 +1937,16 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
*/
if (blk_queue_add_random(q) && dm_table_all_devices_attribute(t, device_is_not_random))
blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, q);
/*
* For a zoned target, the number of zones should be updated for the
* correct value to be exposed in sysfs queue/nr_zones. For a BIO based
* target, this is all that is needed. For a request based target, the
* queue zone bitmaps must also be updated.
* Use blk_revalidate_disk_zones() to handle this.
*/
if (blk_queue_is_zoned(q))
blk_revalidate_disk_zones(t->md->disk);
}
unsigned int dm_table_get_num_targets(struct dm_table *t)
......
......@@ -702,8 +702,7 @@ static int dmz_get_zoned_device(struct dm_target *ti, char *path)
dev->zone_nr_blocks = dmz_sect2blk(dev->zone_nr_sectors);
dev->zone_nr_blocks_shift = ilog2(dev->zone_nr_blocks);
dev->nr_zones = (dev->capacity + dev->zone_nr_sectors - 1)
>> dev->zone_nr_sectors_shift;
dev->nr_zones = blkdev_nr_zones(dev->bdev);
dmz->dev = dev;
......
......@@ -458,6 +458,57 @@ static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
return dm_get_geometry(md, geo);
}
static int dm_blk_report_zones(struct gendisk *disk, sector_t sector,
struct blk_zone *zones, unsigned int *nr_zones,
gfp_t gfp_mask)
{
#ifdef CONFIG_BLK_DEV_ZONED
struct mapped_device *md = disk->private_data;
struct dm_target *tgt;
struct dm_table *map;
int srcu_idx, ret;
if (dm_suspended_md(md))
return -EAGAIN;
map = dm_get_live_table(md, &srcu_idx);
if (!map)
return -EIO;
tgt = dm_table_find_target(map, sector);
if (!dm_target_is_valid(tgt)) {
ret = -EIO;
goto out;
}
/*
* If we are executing this, we already know that the block device
* is a zoned device and so each target should have support for that
* type of drive. A missing report_zones method means that the target
* driver has a problem.
*/
if (WARN_ON(!tgt->type->report_zones)) {
ret = -EIO;
goto out;
}
/*
* blkdev_report_zones() will loop and call this again to cover all the
* zones of the target, eventually moving on to the next target.
* So there is no need to loop here trying to fill the entire array
* of zones.
*/
ret = tgt->type->report_zones(tgt, sector, zones,
nr_zones, gfp_mask);
out:
dm_put_live_table(md, srcu_idx);
return ret;
#else
return -ENOTSUPP;
#endif
}
static int dm_prepare_ioctl(struct mapped_device *md, int *srcu_idx,
struct block_device **bdev)
__acquires(md->io_barrier)
......@@ -1155,93 +1206,49 @@ void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors)
EXPORT_SYMBOL_GPL(dm_accept_partial_bio);
/*
* The zone descriptors obtained with a zone report indicate zone positions
* within the target backing device, regardless of that device is a partition
* and regardless of the target mapping start sector on the device or partition.
* The zone descriptors start sector and write pointer position must be adjusted
* to match their relative position within the dm device.
* A target may call dm_remap_zone_report() after completion of a
* REQ_OP_ZONE_REPORT bio to remap the zone descriptors obtained from the
* backing device.
* The zone descriptors obtained with a zone report indicate
* zone positions within the underlying device of the target. The zone
* descriptors must be remapped to match their position within the dm device.
* The caller target should obtain the zones information using
* blkdev_report_zones() to ensure that remapping for partition offset is
* already handled.
*/
void dm_remap_zone_report(struct dm_target *ti, struct bio *bio, sector_t start)
void dm_remap_zone_report(struct dm_target *ti, sector_t start,
struct blk_zone *zones, unsigned int *nr_zones)
{
#ifdef CONFIG_BLK_DEV_ZONED
struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone);
struct bio *report_bio = tio->io->orig_bio;
struct blk_zone_report_hdr *hdr = NULL;
struct blk_zone *zone;
unsigned int nr_rep = 0;
unsigned int ofst;
sector_t part_offset;
struct bio_vec bvec;
struct bvec_iter iter;
void *addr;
if (bio->bi_status)
return;
/*
* bio sector was incremented by the request size on completion. Taking
* into account the original request sector, the target start offset on
* the backing device and the target mapping offset (ti->begin), the
* start sector of the backing device. The partition offset is always 0
* if the target uses a whole device.
*/
part_offset = bio->bi_iter.bi_sector + ti->begin - (start + bio_end_sector(report_bio));
unsigned int nrz = *nr_zones;
int i;
/*
* Remap the start sector of the reported zones. For sequential zones,
* also remap the write pointer position.
* Remap the start sector and write pointer position of the zones in
* the array. Since we may have obtained from the target underlying
* device more zones that the target size, also adjust the number
* of zones.
*/
bio_for_each_segment(bvec, report_bio, iter) {
addr = kmap_atomic(bvec.bv_page);
/* Remember the report header in the first page */
if (!hdr) {
hdr = addr;
ofst = sizeof(struct blk_zone_report_hdr);
} else
ofst = 0;
/* Set zones start sector */
while (hdr->nr_zones && ofst < bvec.bv_len) {
zone = addr + ofst;
zone->start -= part_offset;
if (zone->start >= start + ti->len) {
hdr->nr_zones = 0;
break;
}
zone->start = zone->start + ti->begin - start;
if (zone->type != BLK_ZONE_TYPE_CONVENTIONAL) {
if (zone->cond == BLK_ZONE_COND_FULL)
zone->wp = zone->start + zone->len;
else if (zone->cond == BLK_ZONE_COND_EMPTY)
zone->wp = zone->start;
else
zone->wp = zone->wp + ti->begin - start - part_offset;
}
ofst += sizeof(struct blk_zone);
hdr->nr_zones--;
nr_rep++;
for (i = 0; i < nrz; i++) {
zone = zones + i;
if (zone->start >= start + ti->len) {
memset(zone, 0, sizeof(struct blk_zone) * (nrz - i));
break;
}
if (addr != hdr)
kunmap_atomic(addr);
zone->start = zone->start + ti->begin - start;
if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
continue;
if (!hdr->nr_zones)
break;
}
if (hdr) {
hdr->nr_zones = nr_rep;
kunmap_atomic(hdr);
if (zone->cond == BLK_ZONE_COND_FULL)
zone->wp = zone->start + zone->len;
else if (zone->cond == BLK_ZONE_COND_EMPTY)
zone->wp = zone->start;
else
zone->wp = zone->wp + ti->begin - start;
}
bio_advance(report_bio, report_bio->bi_iter.bi_size);
*nr_zones = i;
#else /* !CONFIG_BLK_DEV_ZONED */
bio->bi_status = BLK_STS_NOTSUPP;
*nr_zones = 0;
#endif
}
EXPORT_SYMBOL_GPL(dm_remap_zone_report);
......@@ -1327,8 +1334,7 @@ static int clone_bio(struct dm_target_io *tio, struct bio *bio,
return r;
}
if (bio_op(bio) != REQ_OP_ZONE_REPORT)
bio_advance(clone, to_bytes(sector - clone->bi_iter.bi_sector));
bio_advance(clone, to_bytes(sector - clone->bi_iter.bi_sector));
clone->bi_iter.bi_size = to_bytes(len);
if (unlikely(bio_integrity(bio) != NULL))
......@@ -1541,7 +1547,6 @@ static bool __process_abnormal_io(struct clone_info *ci, struct dm_target *ti,
*/
static int __split_and_process_non_flush(struct clone_info *ci)
{
struct bio *bio = ci->bio;
struct dm_target *ti;
unsigned len;
int r;
......@@ -1553,11 +1558,7 @@ static int __split_and_process_non_flush(struct clone_info *ci)
if (unlikely(__process_abnormal_io(ci, ti, &r)))
return r;
if (bio_op(bio) == REQ_OP_ZONE_REPORT)
len = ci->sector_count;
else
len = min_t(sector_t, max_io_len(ci->sector, ti),
ci->sector_count);
len = min_t(sector_t, max_io_len(ci->sector, ti), ci->sector_count);
r = __clone_and_map_data_bio(ci, ti, ci->sector, &len);
if (r < 0)
......@@ -1616,9 +1617,6 @@ static blk_qc_t __split_and_process_bio(struct mapped_device *md,
* We take a clone of the original to store in
* ci.io->orig_bio to be used by end_io_acct() and
* for dec_pending to use for completion handling.
* As this path is not used for REQ_OP_ZONE_REPORT,
* the usage of io->orig_bio in dm_remap_zone_report()
* won't be affected by this reassignment.
*/
struct bio *b = bio_split(bio, bio_sectors(bio) - ci.sector_count,
GFP_NOIO, &md->queue->bio_split);
......@@ -3167,6 +3165,7 @@ static const struct block_device_operations dm_blk_dops = {
.release = dm_blk_close,
.ioctl = dm_blk_ioctl,
.getgeo = dm_blk_getgeo,
.report_zones = dm_blk_report_zones,
.pr_ops = &dm_pr_ops,
.owner = THIS_MODULE
};
......
......@@ -1272,8 +1272,6 @@ static int sd_init_command(struct scsi_cmnd *cmd)
case REQ_OP_READ:
case REQ_OP_WRITE:
return sd_setup_read_write_cmnd(cmd);
case REQ_OP_ZONE_REPORT:
return sd_zbc_setup_report_cmnd(cmd);
case REQ_OP_ZONE_RESET:
return sd_zbc_setup_reset_cmnd(cmd);
default:
......@@ -1802,6 +1800,7 @@ static const struct block_device_operations sd_fops = {
.check_events = sd_check_events,
.revalidate_disk = sd_revalidate_disk,
.unlock_native_capacity = sd_unlock_native_capacity,
.report_zones = sd_zbc_report_zones,
.pr_ops = &sd_pr_ops,
};
......@@ -1953,16 +1952,6 @@ static int sd_done(struct scsi_cmnd *SCpnt)
scsi_set_resid(SCpnt, blk_rq_bytes(req));
}
break;
case REQ_OP_ZONE_REPORT:
if (!result) {
good_bytes = scsi_bufflen(SCpnt)
- scsi_get_resid(SCpnt);
scsi_set_resid(SCpnt, 0);
} else {
good_bytes = 0;
scsi_set_resid(SCpnt, blk_rq_bytes(req));
}
break;
default:
/*
* In case of bogus fw or device, we could end up having
......@@ -3425,8 +3414,6 @@ static int sd_remove(struct device *dev)
del_gendisk(sdkp->disk);
sd_shutdown(dev);
sd_zbc_remove(sdkp);
free_opal_dev(sdkp->opal_dev);
blk_register_region(devt, SD_MINORS, NULL,
......
......@@ -76,7 +76,6 @@ struct scsi_disk {
#ifdef CONFIG_BLK_DEV_ZONED
u32 nr_zones;
u32 zone_blocks;
u32 zone_shift;
u32 zones_optimal_open;
u32 zones_optimal_nonseq;
u32 zones_max_open;
......@@ -271,12 +270,13 @@ static inline int sd_is_zoned(struct scsi_disk *sdkp)
#ifdef CONFIG_BLK_DEV_ZONED
extern int sd_zbc_read_zones(struct scsi_disk *sdkp, unsigned char *buffer);
extern void sd_zbc_remove(struct scsi_disk *sdkp);
extern void sd_zbc_print_zones(struct scsi_disk *sdkp);
extern int sd_zbc_setup_report_cmnd(struct scsi_cmnd *cmd);
extern int sd_zbc_setup_reset_cmnd(struct scsi_cmnd *cmd);
extern void sd_zbc_complete(struct scsi_cmnd *cmd, unsigned int good_bytes,
struct scsi_sense_hdr *sshdr);
extern int sd_zbc_report_zones(struct gendisk *disk, sector_t sector,
struct blk_zone *zones, unsigned int *nr_zones,
gfp_t gfp_mask);
#else /* CONFIG_BLK_DEV_ZONED */
......@@ -286,15 +286,8 @@ static inline int sd_zbc_read_zones(struct scsi_disk *sdkp,
return 0;
}
static inline void sd_zbc_remove(struct scsi_disk *sdkp) {}
static inline void sd_zbc_print_zones(struct scsi_disk *sdkp) {}
static inline int sd_zbc_setup_report_cmnd(struct scsi_cmnd *cmd)
{
return BLKPREP_INVALID;
}
static inline int sd_zbc_setup_reset_cmnd(struct scsi_cmnd *cmd)
{
return BLKPREP_INVALID;
......@@ -304,6 +297,8 @@ static inline void sd_zbc_complete(struct scsi_cmnd *cmd,
unsigned int good_bytes,
struct scsi_sense_hdr *sshdr) {}
#define sd_zbc_report_zones NULL
#endif /* CONFIG_BLK_DEV_ZONED */
#endif /* _SCSI_DISK_H */
This diff is collapsed.
......@@ -283,8 +283,6 @@ enum req_opf {
REQ_OP_FLUSH = 2,
/* discard sectors */
REQ_OP_DISCARD = 3,
/* get zone information */
REQ_OP_ZONE_REPORT = 4,
/* securely erase sectors */
REQ_OP_SECURE_ERASE = 5,
/* seset a zone write pointer */
......
......@@ -396,16 +396,13 @@ struct queue_limits {
#ifdef CONFIG_BLK_DEV_ZONED
struct blk_zone_report_hdr {
unsigned int nr_zones;
u8 padding[60];
};
extern unsigned int blkdev_nr_zones(struct block_device *bdev);
extern int blkdev_report_zones(struct block_device *bdev,
sector_t sector, struct blk_zone *zones,
unsigned int *nr_zones, gfp_t gfp_mask);
extern int blkdev_reset_zones(struct block_device *bdev, sector_t sectors,
sector_t nr_sectors, gfp_t gfp_mask);
extern int blk_revalidate_disk_zones(struct gendisk *disk);
extern int blkdev_report_zones_ioctl(struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long arg);
......@@ -414,6 +411,16 @@ extern int blkdev_reset_zones_ioctl(struct block_device *bdev, fmode_t mode,
#else /* CONFIG_BLK_DEV_ZONED */
static inline unsigned int blkdev_nr_zones(struct block_device *bdev)
{
return 0;
}
static inline int blk_revalidate_disk_zones(struct gendisk *disk)
{
return 0;
}
static inline int blkdev_report_zones_ioctl(struct block_device *bdev,
fmode_t mode, unsigned int cmd,
unsigned long arg)
......@@ -806,6 +813,11 @@ static inline unsigned int blk_queue_zone_sectors(struct request_queue *q)
}
#ifdef CONFIG_BLK_DEV_ZONED
static inline unsigned int blk_queue_nr_zones(struct request_queue *q)
{
return blk_queue_is_zoned(q) ? q->nr_zones : 0;
}
static inline unsigned int blk_queue_zone_no(struct request_queue *q,
sector_t sector)
{
......@@ -821,6 +833,11 @@ static inline bool blk_queue_zone_is_seq(struct request_queue *q,
return false;
return test_bit(blk_queue_zone_no(q, sector), q->seq_zones_bitmap);
}
#else /* CONFIG_BLK_DEV_ZONED */
static inline unsigned int blk_queue_nr_zones(struct request_queue *q)
{
return 0;
}
#endif /* CONFIG_BLK_DEV_ZONED */
static inline bool rq_is_sync(struct request *rq)
......@@ -1852,6 +1869,9 @@ struct block_device_operations {
int (*getgeo)(struct block_device *, struct hd_geometry *);
/* this callback is with swap_lock and sometimes page table lock held */
void (*swap_slot_free_notify) (struct block_device *, unsigned long);
int (*report_zones)(struct gendisk *, sector_t sector,
struct blk_zone *zones, unsigned int *nr_zones,
gfp_t gfp_mask);
struct module *owner;
const struct pr_ops *pr_ops;
};
......
......@@ -92,6 +92,11 @@ typedef int (*dm_message_fn) (struct dm_target *ti, unsigned argc, char **argv,
typedef int (*dm_prepare_ioctl_fn) (struct dm_target *ti, struct block_device **bdev);
typedef int (*dm_report_zones_fn) (struct dm_target *ti, sector_t sector,
struct blk_zone *zones,
unsigned int *nr_zones,
gfp_t gfp_mask);
/*
* These iteration functions are typically used to check (and combine)
* properties of underlying devices.
......@@ -180,6 +185,9 @@ struct target_type {
dm_status_fn status;
dm_message_fn message;
dm_prepare_ioctl_fn prepare_ioctl;
#ifdef CONFIG_BLK_DEV_ZONED
dm_report_zones_fn report_zones;
#endif
dm_busy_fn busy;
dm_iterate_devices_fn iterate_devices;
dm_io_hints_fn io_hints;
......@@ -420,8 +428,8 @@ struct gendisk *dm_disk(struct mapped_device *md);
int dm_suspended(struct dm_target *ti);
int dm_noflush_suspending(struct dm_target *ti);
void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors);
void dm_remap_zone_report(struct dm_target *ti, struct bio *bio,
sector_t start);
void dm_remap_zone_report(struct dm_target *ti, sector_t start,
struct blk_zone *zones, unsigned int *nr_zones);
union map_info *dm_get_rq_mapinfo(struct request *rq);
struct queue_limits *dm_get_queue_limits(struct mapped_device *md);
......
......@@ -82,7 +82,6 @@ TRACE_DEFINE_ENUM(CP_TRIMMED);
{ REQ_OP_WRITE, "WRITE" }, \
{ REQ_OP_FLUSH, "FLUSH" }, \
{ REQ_OP_DISCARD, "DISCARD" }, \
{ REQ_OP_ZONE_REPORT, "ZONE_REPORT" }, \
{ REQ_OP_SECURE_ERASE, "SECURE_ERASE" }, \
{ REQ_OP_ZONE_RESET, "ZONE_RESET" }, \
{ REQ_OP_WRITE_SAME, "WRITE_SAME" }, \
......
......@@ -137,8 +137,11 @@ struct blk_zone_range {
* sector specified in the report request structure.
* @BLKRESETZONE: Reset the write pointer of the zones in the specified
* sector range. The sector range must be zone aligned.
* @BLKGETZONESZ: Get the device zone size in number of 512 B sectors.
*/
#define BLKREPORTZONE _IOWR(0x12, 130, struct blk_zone_report)
#define BLKRESETZONE _IOW(0x12, 131, struct blk_zone_range)
#define BLKGETZONESZ _IOW(0x12, 132, __u32)
#define BLKGETNRZONES _IOW(0x12, 133, __u32)
#endif /* _UAPI_BLKZONED_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment