Commit 59749c2d authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-linus-20181115' of git://git.kernel.dk/linux-block

Pull block fixes from Jens Axboe:

 - Discard loop fix, caused by integer overflow (Dave)

 - Blacklist of Samsung drive that hangs with power management (Diego)

 - Copy bio priority when cloning it (Hannes)

 - Fix race condition exposed in floppy (me)

 - Fix SCSI queue cleanup regression. While elusive, it caused oopses in
   queue running (Ming)

 - Fix bad string copy in kyber tracing (Omar)

* tag 'for-linus-20181115' of git://git.kernel.dk/linux-block:
  SCSI: fix queue cleanup race before queue initialization is done
  block: fix 32 bit overflow in __blkdev_issue_discard()
  libata: blacklist SAMSUNG MZ7TD256HAFV-000L9 SSD
  block: copy ioprio in __bio_clone_fast() and bounce
  kyber: fix wrong strlcpy() size in trace_kyber_latency()
  floppy: fix race condition in __floppy_read_block_0()
parents 9b5f361a 8dc765d4
......@@ -605,6 +605,7 @@ void __bio_clone_fast(struct bio *bio, struct bio *bio_src)
if (bio_flagged(bio_src, BIO_THROTTLED))
bio_set_flag(bio, BIO_THROTTLED);
bio->bi_opf = bio_src->bi_opf;
bio->bi_ioprio = bio_src->bi_ioprio;
bio->bi_write_hint = bio_src->bi_write_hint;
bio->bi_iter = bio_src->bi_iter;
bio->bi_io_vec = bio_src->bi_io_vec;
......
......@@ -798,9 +798,8 @@ void blk_cleanup_queue(struct request_queue *q)
* dispatch may still be in-progress since we dispatch requests
* from more than one contexts.
*
* No need to quiesce queue if it isn't initialized yet since
* blk_freeze_queue() should be enough for cases of passthrough
* request.
* We rely on driver to deal with the race in case that queue
* initialization isn't done.
*/
if (q->mq_ops && blk_queue_init_done(q))
blk_mq_quiesce_queue(q);
......
......@@ -55,9 +55,11 @@ int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
return -EINVAL;
while (nr_sects) {
unsigned int req_sects = min_t(unsigned int, nr_sects,
sector_t req_sects = min_t(sector_t, nr_sects,
bio_allowed_max_sectors(q));
WARN_ON_ONCE((req_sects << 9) > UINT_MAX);
bio = blk_next_bio(bio, 0, gfp_mask);
bio->bi_iter.bi_sector = sector;
bio_set_dev(bio, bdev);
......
......@@ -248,6 +248,7 @@ static struct bio *bounce_clone_bio(struct bio *bio_src, gfp_t gfp_mask,
return NULL;
bio->bi_disk = bio_src->bi_disk;
bio->bi_opf = bio_src->bi_opf;
bio->bi_ioprio = bio_src->bi_ioprio;
bio->bi_write_hint = bio_src->bi_write_hint;
bio->bi_iter.bi_sector = bio_src->bi_iter.bi_sector;
bio->bi_iter.bi_size = bio_src->bi_iter.bi_size;
......
......@@ -4553,7 +4553,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
/* These specific Samsung models/firmware-revs do not handle LPM well */
{ "SAMSUNG MZMPC128HBFU-000MV", "CXM14M1Q", ATA_HORKAGE_NOLPM, },
{ "SAMSUNG SSD PM830 mSATA *", "CXM13D1Q", ATA_HORKAGE_NOLPM, },
{ "SAMSUNG MZ7TD256HAFV-000L9", "DXT02L5Q", ATA_HORKAGE_NOLPM, },
{ "SAMSUNG MZ7TD256HAFV-000L9", NULL, ATA_HORKAGE_NOLPM, },
/* devices that don't properly handle queued TRIM commands */
{ "Micron_M500IT_*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
......
......@@ -4148,10 +4148,11 @@ static int __floppy_read_block_0(struct block_device *bdev, int drive)
bio.bi_end_io = floppy_rb0_cb;
bio_set_op_attrs(&bio, REQ_OP_READ, 0);
init_completion(&cbdata.complete);
submit_bio(&bio);
process_fd_request();
init_completion(&cbdata.complete);
wait_for_completion(&cbdata.complete);
__free_page(page);
......
......@@ -697,6 +697,12 @@ static bool scsi_end_request(struct request *req, blk_status_t error,
*/
scsi_mq_uninit_cmd(cmd);
/*
* queue is still alive, so grab the ref for preventing it
* from being cleaned up during running queue.
*/
percpu_ref_get(&q->q_usage_counter);
__blk_mq_end_request(req, error);
if (scsi_target(sdev)->single_lun ||
......@@ -704,6 +710,8 @@ static bool scsi_end_request(struct request *req, blk_status_t error,
kblockd_schedule_work(&sdev->requeue_work);
else
blk_mq_run_hw_queues(q, true);
percpu_ref_put(&q->q_usage_counter);
} else {
unsigned long flags;
......
......@@ -31,8 +31,8 @@ TRACE_EVENT(kyber_latency,
TP_fast_assign(
__entry->dev = disk_devt(dev_to_disk(kobj_to_dev(q->kobj.parent)));
strlcpy(__entry->domain, domain, DOMAIN_LEN);
strlcpy(__entry->type, type, DOMAIN_LEN);
strlcpy(__entry->domain, domain, sizeof(__entry->domain));
strlcpy(__entry->type, type, sizeof(__entry->type));
__entry->percentile = percentile;
__entry->numerator = numerator;
__entry->denominator = denominator;
......@@ -60,7 +60,7 @@ TRACE_EVENT(kyber_adjust,
TP_fast_assign(
__entry->dev = disk_devt(dev_to_disk(kobj_to_dev(q->kobj.parent)));
strlcpy(__entry->domain, domain, DOMAIN_LEN);
strlcpy(__entry->domain, domain, sizeof(__entry->domain));
__entry->depth = depth;
),
......@@ -82,7 +82,7 @@ TRACE_EVENT(kyber_throttled,
TP_fast_assign(
__entry->dev = disk_devt(dev_to_disk(kobj_to_dev(q->kobj.parent)));
strlcpy(__entry->domain, domain, DOMAIN_LEN);
strlcpy(__entry->domain, domain, sizeof(__entry->domain));
),
TP_printk("%d,%d %s", MAJOR(__entry->dev), MINOR(__entry->dev),
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment