Commit a773187e authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Martin K. Petersen

scsi: dm: Remove WRITE_SAME support

There are no more end-users of REQ_OP_WRITE_SAME left, so we can start
deleting it.

Link: https://lore.kernel.org/r/20220209082828.2629273-7-hch@lst.deReviewed-by: default avatarMike Snitzer <snitzer@redhat.com>
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarMartin K. Petersen <martin.petersen@oracle.com>
parent 10fa225c
...@@ -141,7 +141,6 @@ struct mapped_device { ...@@ -141,7 +141,6 @@ struct mapped_device {
#define DMF_EMULATE_ZONE_APPEND 9 #define DMF_EMULATE_ZONE_APPEND 9
void disable_discard(struct mapped_device *md); void disable_discard(struct mapped_device *md);
void disable_write_same(struct mapped_device *md);
void disable_write_zeroes(struct mapped_device *md); void disable_write_zeroes(struct mapped_device *md);
static inline sector_t dm_get_size(struct mapped_device *md) static inline sector_t dm_get_size(struct mapped_device *md)
......
...@@ -2006,7 +2006,6 @@ static bool kcryptd_crypt_write_inline(struct crypt_config *cc, ...@@ -2006,7 +2006,6 @@ static bool kcryptd_crypt_write_inline(struct crypt_config *cc,
*/ */
switch (bio_op(ctx->bio_in)) { switch (bio_op(ctx->bio_in)) {
case REQ_OP_WRITE: case REQ_OP_WRITE:
case REQ_OP_WRITE_SAME:
case REQ_OP_WRITE_ZEROES: case REQ_OP_WRITE_ZEROES:
return true; return true;
default: default:
......
...@@ -335,7 +335,6 @@ static int ebs_ctr(struct dm_target *ti, unsigned int argc, char **argv) ...@@ -335,7 +335,6 @@ static int ebs_ctr(struct dm_target *ti, unsigned int argc, char **argv)
ti->num_flush_bios = 1; ti->num_flush_bios = 1;
ti->num_discard_bios = 1; ti->num_discard_bios = 1;
ti->num_secure_erase_bios = 0; ti->num_secure_erase_bios = 0;
ti->num_write_same_bios = 0;
ti->num_write_zeroes_bios = 0; ti->num_write_zeroes_bios = 0;
return 0; return 0;
bad: bad:
......
...@@ -304,7 +304,6 @@ static void do_region(int op, int op_flags, unsigned region, ...@@ -304,7 +304,6 @@ static void do_region(int op, int op_flags, unsigned region,
unsigned num_bvecs; unsigned num_bvecs;
sector_t remaining = where->count; sector_t remaining = where->count;
struct request_queue *q = bdev_get_queue(where->bdev); struct request_queue *q = bdev_get_queue(where->bdev);
unsigned short logical_block_size = queue_logical_block_size(q);
sector_t num_sectors; sector_t num_sectors;
unsigned int special_cmd_max_sectors; unsigned int special_cmd_max_sectors;
...@@ -315,10 +314,8 @@ static void do_region(int op, int op_flags, unsigned region, ...@@ -315,10 +314,8 @@ static void do_region(int op, int op_flags, unsigned region,
special_cmd_max_sectors = q->limits.max_discard_sectors; special_cmd_max_sectors = q->limits.max_discard_sectors;
else if (op == REQ_OP_WRITE_ZEROES) else if (op == REQ_OP_WRITE_ZEROES)
special_cmd_max_sectors = q->limits.max_write_zeroes_sectors; special_cmd_max_sectors = q->limits.max_write_zeroes_sectors;
else if (op == REQ_OP_WRITE_SAME) if ((op == REQ_OP_DISCARD || op == REQ_OP_WRITE_ZEROES) &&
special_cmd_max_sectors = q->limits.max_write_same_sectors; special_cmd_max_sectors == 0) {
if ((op == REQ_OP_DISCARD || op == REQ_OP_WRITE_ZEROES ||
op == REQ_OP_WRITE_SAME) && special_cmd_max_sectors == 0) {
atomic_inc(&io->count); atomic_inc(&io->count);
dec_count(io, region, BLK_STS_NOTSUPP); dec_count(io, region, BLK_STS_NOTSUPP);
return; return;
...@@ -337,9 +334,6 @@ static void do_region(int op, int op_flags, unsigned region, ...@@ -337,9 +334,6 @@ static void do_region(int op, int op_flags, unsigned region,
case REQ_OP_WRITE_ZEROES: case REQ_OP_WRITE_ZEROES:
num_bvecs = 0; num_bvecs = 0;
break; break;
case REQ_OP_WRITE_SAME:
num_bvecs = 1;
break;
default: default:
num_bvecs = bio_max_segs(dm_sector_div_up(remaining, num_bvecs = bio_max_segs(dm_sector_div_up(remaining,
(PAGE_SIZE >> SECTOR_SHIFT))); (PAGE_SIZE >> SECTOR_SHIFT)));
...@@ -356,18 +350,6 @@ static void do_region(int op, int op_flags, unsigned region, ...@@ -356,18 +350,6 @@ static void do_region(int op, int op_flags, unsigned region,
num_sectors = min_t(sector_t, special_cmd_max_sectors, remaining); num_sectors = min_t(sector_t, special_cmd_max_sectors, remaining);
bio->bi_iter.bi_size = num_sectors << SECTOR_SHIFT; bio->bi_iter.bi_size = num_sectors << SECTOR_SHIFT;
remaining -= num_sectors; remaining -= num_sectors;
} else if (op == REQ_OP_WRITE_SAME) {
/*
* WRITE SAME only uses a single page.
*/
dp->get_page(dp, &page, &len, &offset);
bio_add_page(bio, page, logical_block_size, offset);
num_sectors = min_t(sector_t, special_cmd_max_sectors, remaining);
bio->bi_iter.bi_size = num_sectors << SECTOR_SHIFT;
offset = 0;
remaining -= num_sectors;
dp->next_page(dp);
} else while (remaining) { } else while (remaining) {
/* /*
* Try and add as many pages as possible. * Try and add as many pages as possible.
......
...@@ -60,7 +60,6 @@ static int linear_ctr(struct dm_target *ti, unsigned int argc, char **argv) ...@@ -60,7 +60,6 @@ static int linear_ctr(struct dm_target *ti, unsigned int argc, char **argv)
ti->num_flush_bios = 1; ti->num_flush_bios = 1;
ti->num_discard_bios = 1; ti->num_discard_bios = 1;
ti->num_secure_erase_bios = 1; ti->num_secure_erase_bios = 1;
ti->num_write_same_bios = 1;
ti->num_write_zeroes_bios = 1; ti->num_write_zeroes_bios = 1;
ti->private = lc; ti->private = lc;
return 0; return 0;
......
...@@ -1252,7 +1252,6 @@ static int multipath_ctr(struct dm_target *ti, unsigned argc, char **argv) ...@@ -1252,7 +1252,6 @@ static int multipath_ctr(struct dm_target *ti, unsigned argc, char **argv)
ti->num_flush_bios = 1; ti->num_flush_bios = 1;
ti->num_discard_bios = 1; ti->num_discard_bios = 1;
ti->num_write_same_bios = 1;
ti->num_write_zeroes_bios = 1; ti->num_write_zeroes_bios = 1;
if (m->queue_mode == DM_TYPE_BIO_BASED) if (m->queue_mode == DM_TYPE_BIO_BASED)
ti->per_io_data_size = multipath_per_bio_data_size(); ti->per_io_data_size = multipath_per_bio_data_size();
......
...@@ -217,9 +217,6 @@ static void dm_done(struct request *clone, blk_status_t error, bool mapped) ...@@ -217,9 +217,6 @@ static void dm_done(struct request *clone, blk_status_t error, bool mapped)
if (req_op(clone) == REQ_OP_DISCARD && if (req_op(clone) == REQ_OP_DISCARD &&
!clone->q->limits.max_discard_sectors) !clone->q->limits.max_discard_sectors)
disable_discard(tio->md); disable_discard(tio->md);
else if (req_op(clone) == REQ_OP_WRITE_SAME &&
!clone->q->limits.max_write_same_sectors)
disable_write_same(tio->md);
else if (req_op(clone) == REQ_OP_WRITE_ZEROES && else if (req_op(clone) == REQ_OP_WRITE_ZEROES &&
!clone->q->limits.max_write_zeroes_sectors) !clone->q->limits.max_write_zeroes_sectors)
disable_write_zeroes(tio->md); disable_write_zeroes(tio->md);
......
...@@ -157,7 +157,6 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv) ...@@ -157,7 +157,6 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
ti->num_flush_bios = stripes; ti->num_flush_bios = stripes;
ti->num_discard_bios = stripes; ti->num_discard_bios = stripes;
ti->num_secure_erase_bios = stripes; ti->num_secure_erase_bios = stripes;
ti->num_write_same_bios = stripes;
ti->num_write_zeroes_bios = stripes; ti->num_write_zeroes_bios = stripes;
sc->chunk_size = chunk_size; sc->chunk_size = chunk_size;
...@@ -284,8 +283,7 @@ static int stripe_map(struct dm_target *ti, struct bio *bio) ...@@ -284,8 +283,7 @@ static int stripe_map(struct dm_target *ti, struct bio *bio)
} }
if (unlikely(bio_op(bio) == REQ_OP_DISCARD) || if (unlikely(bio_op(bio) == REQ_OP_DISCARD) ||
unlikely(bio_op(bio) == REQ_OP_SECURE_ERASE) || unlikely(bio_op(bio) == REQ_OP_SECURE_ERASE) ||
unlikely(bio_op(bio) == REQ_OP_WRITE_ZEROES) || unlikely(bio_op(bio) == REQ_OP_WRITE_ZEROES)) {
unlikely(bio_op(bio) == REQ_OP_WRITE_SAME)) {
target_bio_nr = dm_bio_get_target_bio_nr(bio); target_bio_nr = dm_bio_get_target_bio_nr(bio);
BUG_ON(target_bio_nr >= sc->stripes); BUG_ON(target_bio_nr >= sc->stripes);
return stripe_map_range(sc, bio, target_bio_nr); return stripe_map_range(sc, bio, target_bio_nr);
......
...@@ -1822,33 +1822,6 @@ static int device_is_not_random(struct dm_target *ti, struct dm_dev *dev, ...@@ -1822,33 +1822,6 @@ static int device_is_not_random(struct dm_target *ti, struct dm_dev *dev,
return !blk_queue_add_random(q); return !blk_queue_add_random(q);
} }
static int device_not_write_same_capable(struct dm_target *ti, struct dm_dev *dev,
sector_t start, sector_t len, void *data)
{
struct request_queue *q = bdev_get_queue(dev->bdev);
return !q->limits.max_write_same_sectors;
}
static bool dm_table_supports_write_same(struct dm_table *t)
{
struct dm_target *ti;
unsigned i;
for (i = 0; i < dm_table_get_num_targets(t); i++) {
ti = dm_table_get_target(t, i);
if (!ti->num_write_same_bios)
return false;
if (!ti->type->iterate_devices ||
ti->type->iterate_devices(ti, device_not_write_same_capable, NULL))
return false;
}
return true;
}
static int device_not_write_zeroes_capable(struct dm_target *ti, struct dm_dev *dev, static int device_not_write_zeroes_capable(struct dm_target *ti, struct dm_dev *dev,
sector_t start, sector_t len, void *data) sector_t start, sector_t len, void *data)
{ {
...@@ -2027,8 +2000,6 @@ int dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, ...@@ -2027,8 +2000,6 @@ int dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
else else
blk_queue_flag_set(QUEUE_FLAG_NONROT, q); blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
if (!dm_table_supports_write_same(t))
q->limits.max_write_same_sectors = 0;
if (!dm_table_supports_write_zeroes(t)) if (!dm_table_supports_write_zeroes(t))
q->limits.max_write_zeroes_sectors = 0; q->limits.max_write_zeroes_sectors = 0;
......
...@@ -130,7 +130,6 @@ bool dm_is_zone_write(struct mapped_device *md, struct bio *bio) ...@@ -130,7 +130,6 @@ bool dm_is_zone_write(struct mapped_device *md, struct bio *bio)
switch (bio_op(bio)) { switch (bio_op(bio)) {
case REQ_OP_WRITE_ZEROES: case REQ_OP_WRITE_ZEROES:
case REQ_OP_WRITE_SAME:
case REQ_OP_WRITE: case REQ_OP_WRITE:
return !op_is_flush(bio->bi_opf) && bio_sectors(bio); return !op_is_flush(bio->bi_opf) && bio_sectors(bio);
default: default:
...@@ -390,7 +389,6 @@ static bool dm_zone_map_bio_begin(struct mapped_device *md, ...@@ -390,7 +389,6 @@ static bool dm_zone_map_bio_begin(struct mapped_device *md,
case REQ_OP_ZONE_FINISH: case REQ_OP_ZONE_FINISH:
return true; return true;
case REQ_OP_WRITE_ZEROES: case REQ_OP_WRITE_ZEROES:
case REQ_OP_WRITE_SAME:
case REQ_OP_WRITE: case REQ_OP_WRITE:
/* Writes must be aligned to the zone write pointer */ /* Writes must be aligned to the zone write pointer */
if ((clone->bi_iter.bi_sector & (zsectors - 1)) != zwp_offset) if ((clone->bi_iter.bi_sector & (zsectors - 1)) != zwp_offset)
...@@ -446,7 +444,6 @@ static blk_status_t dm_zone_map_bio_end(struct mapped_device *md, ...@@ -446,7 +444,6 @@ static blk_status_t dm_zone_map_bio_end(struct mapped_device *md,
blk_queue_zone_sectors(md->queue)); blk_queue_zone_sectors(md->queue));
return BLK_STS_OK; return BLK_STS_OK;
case REQ_OP_WRITE_ZEROES: case REQ_OP_WRITE_ZEROES:
case REQ_OP_WRITE_SAME:
case REQ_OP_WRITE: case REQ_OP_WRITE:
WRITE_ONCE(md->zwp_offset[zno], zwp_offset + nr_sectors); WRITE_ONCE(md->zwp_offset[zno], zwp_offset + nr_sectors);
return BLK_STS_OK; return BLK_STS_OK;
...@@ -503,7 +500,6 @@ static bool dm_need_zone_wp_tracking(struct bio *orig_bio) ...@@ -503,7 +500,6 @@ static bool dm_need_zone_wp_tracking(struct bio *orig_bio)
return false; return false;
switch (bio_op(orig_bio)) { switch (bio_op(orig_bio)) {
case REQ_OP_WRITE_ZEROES: case REQ_OP_WRITE_ZEROES:
case REQ_OP_WRITE_SAME:
case REQ_OP_WRITE: case REQ_OP_WRITE:
case REQ_OP_ZONE_RESET: case REQ_OP_ZONE_RESET:
case REQ_OP_ZONE_FINISH: case REQ_OP_ZONE_FINISH:
......
...@@ -855,14 +855,6 @@ void disable_discard(struct mapped_device *md) ...@@ -855,14 +855,6 @@ void disable_discard(struct mapped_device *md)
blk_queue_flag_clear(QUEUE_FLAG_DISCARD, md->queue); blk_queue_flag_clear(QUEUE_FLAG_DISCARD, md->queue);
} }
void disable_write_same(struct mapped_device *md)
{
struct queue_limits *limits = dm_get_queue_limits(md);
/* device doesn't really support WRITE SAME, disable it */
limits->max_write_same_sectors = 0;
}
void disable_write_zeroes(struct mapped_device *md) void disable_write_zeroes(struct mapped_device *md)
{ {
struct queue_limits *limits = dm_get_queue_limits(md); struct queue_limits *limits = dm_get_queue_limits(md);
...@@ -889,9 +881,6 @@ static void clone_endio(struct bio *bio) ...@@ -889,9 +881,6 @@ static void clone_endio(struct bio *bio)
if (bio_op(bio) == REQ_OP_DISCARD && if (bio_op(bio) == REQ_OP_DISCARD &&
!q->limits.max_discard_sectors) !q->limits.max_discard_sectors)
disable_discard(md); disable_discard(md);
else if (bio_op(bio) == REQ_OP_WRITE_SAME &&
!q->limits.max_write_same_sectors)
disable_write_same(md);
else if (bio_op(bio) == REQ_OP_WRITE_ZEROES && else if (bio_op(bio) == REQ_OP_WRITE_ZEROES &&
!q->limits.max_write_zeroes_sectors) !q->limits.max_write_zeroes_sectors)
disable_write_zeroes(md); disable_write_zeroes(md);
...@@ -1370,7 +1359,6 @@ static bool is_abnormal_io(struct bio *bio) ...@@ -1370,7 +1359,6 @@ static bool is_abnormal_io(struct bio *bio)
switch (bio_op(bio)) { switch (bio_op(bio)) {
case REQ_OP_DISCARD: case REQ_OP_DISCARD:
case REQ_OP_SECURE_ERASE: case REQ_OP_SECURE_ERASE:
case REQ_OP_WRITE_SAME:
case REQ_OP_WRITE_ZEROES: case REQ_OP_WRITE_ZEROES:
r = true; r = true;
break; break;
...@@ -1392,9 +1380,6 @@ static bool __process_abnormal_io(struct clone_info *ci, struct dm_target *ti, ...@@ -1392,9 +1380,6 @@ static bool __process_abnormal_io(struct clone_info *ci, struct dm_target *ti,
case REQ_OP_SECURE_ERASE: case REQ_OP_SECURE_ERASE:
num_bios = ti->num_secure_erase_bios; num_bios = ti->num_secure_erase_bios;
break; break;
case REQ_OP_WRITE_SAME:
num_bios = ti->num_write_same_bios;
break;
case REQ_OP_WRITE_ZEROES: case REQ_OP_WRITE_ZEROES:
num_bios = ti->num_write_zeroes_bios; num_bios = ti->num_write_zeroes_bios;
break; break;
......
...@@ -316,12 +316,6 @@ struct dm_target { ...@@ -316,12 +316,6 @@ struct dm_target {
*/ */
unsigned num_secure_erase_bios; unsigned num_secure_erase_bios;
/*
* The number of WRITE SAME bios that will be submitted to the target.
* The bio number can be accessed with dm_bio_get_target_bio_nr.
*/
unsigned num_write_same_bios;
/* /*
* The number of WRITE ZEROES bios that will be submitted to the target. * The number of WRITE ZEROES bios that will be submitted to the target.
* The bio number can be accessed with dm_bio_get_target_bio_nr. * The bio number can be accessed with dm_bio_get_target_bio_nr.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment