Commit 7c4a9459 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'block-5.17-2022-02-04' of git://git.kernel.dk/linux-block

Pull block fixes from Jens Axboe:

 - NVMe pull request
    - fix use-after-free in rdma and tcp controller reset (Sagi Grimberg)
    - fix the state check in nvmf_ctlr_matches_baseopts (Uday Shankar)

 - MD nowait null pointer fix (Song)

 - blk-integrity seed advance fix (Martin)

 - Fix a dio regression in this merge window (Ilya)

* tag 'block-5.17-2022-02-04' of git://git.kernel.dk/linux-block:
  block: bio-integrity: Advance seed correctly for larger interval sizes
  nvme-fabrics: fix state check in nvmf_ctlr_matches_baseopts()
  md: fix NULL pointer deref with nowait but no mddev->queue
  block: fix DIO handling regressions in blkdev_read_iter()
  nvme-rdma: fix possible use-after-free in transport error_recovery work
  nvme-tcp: fix possible use-after-free in transport error_recovery work
  nvme: fix a possible use-after-free in controller reset during load
parents 494a2c2b b13e0c71
...@@ -373,7 +373,7 @@ void bio_integrity_advance(struct bio *bio, unsigned int bytes_done) ...@@ -373,7 +373,7 @@ void bio_integrity_advance(struct bio *bio, unsigned int bytes_done)
struct blk_integrity *bi = blk_get_integrity(bio->bi_bdev->bd_disk); struct blk_integrity *bi = blk_get_integrity(bio->bi_bdev->bd_disk);
unsigned bytes = bio_integrity_bytes(bi, bytes_done >> 9); unsigned bytes = bio_integrity_bytes(bi, bytes_done >> 9);
bip->bip_iter.bi_sector += bytes_done >> 9; bip->bip_iter.bi_sector += bio_integrity_intervals(bi, bytes_done >> 9);
bvec_iter_advance(bip->bip_vec, &bip->bip_iter, bytes); bvec_iter_advance(bip->bip_vec, &bip->bip_iter, bytes);
} }
......
...@@ -566,34 +566,37 @@ static ssize_t blkdev_read_iter(struct kiocb *iocb, struct iov_iter *to) ...@@ -566,34 +566,37 @@ static ssize_t blkdev_read_iter(struct kiocb *iocb, struct iov_iter *to)
{ {
struct block_device *bdev = iocb->ki_filp->private_data; struct block_device *bdev = iocb->ki_filp->private_data;
loff_t size = bdev_nr_bytes(bdev); loff_t size = bdev_nr_bytes(bdev);
size_t count = iov_iter_count(to);
loff_t pos = iocb->ki_pos; loff_t pos = iocb->ki_pos;
size_t shorted = 0; size_t shorted = 0;
ssize_t ret = 0; ssize_t ret = 0;
size_t count;
if (unlikely(pos + count > size)) { if (unlikely(pos + iov_iter_count(to) > size)) {
if (pos >= size) if (pos >= size)
return 0; return 0;
size -= pos; size -= pos;
if (count > size) { shorted = iov_iter_count(to) - size;
shorted = count - size; iov_iter_truncate(to, size);
iov_iter_truncate(to, size);
}
} }
count = iov_iter_count(to);
if (!count)
goto reexpand; /* skip atime */
if (iocb->ki_flags & IOCB_DIRECT) { if (iocb->ki_flags & IOCB_DIRECT) {
struct address_space *mapping = iocb->ki_filp->f_mapping; struct address_space *mapping = iocb->ki_filp->f_mapping;
if (iocb->ki_flags & IOCB_NOWAIT) { if (iocb->ki_flags & IOCB_NOWAIT) {
if (filemap_range_needs_writeback(mapping, iocb->ki_pos, if (filemap_range_needs_writeback(mapping, pos,
iocb->ki_pos + count - 1)) pos + count - 1)) {
return -EAGAIN; ret = -EAGAIN;
goto reexpand;
}
} else { } else {
ret = filemap_write_and_wait_range(mapping, ret = filemap_write_and_wait_range(mapping, pos,
iocb->ki_pos, pos + count - 1);
iocb->ki_pos + count - 1);
if (ret < 0) if (ret < 0)
return ret; goto reexpand;
} }
file_accessed(iocb->ki_filp); file_accessed(iocb->ki_filp);
...@@ -603,12 +606,14 @@ static ssize_t blkdev_read_iter(struct kiocb *iocb, struct iov_iter *to) ...@@ -603,12 +606,14 @@ static ssize_t blkdev_read_iter(struct kiocb *iocb, struct iov_iter *to)
iocb->ki_pos += ret; iocb->ki_pos += ret;
count -= ret; count -= ret;
} }
iov_iter_revert(to, count - iov_iter_count(to));
if (ret < 0 || !count) if (ret < 0 || !count)
return ret; goto reexpand;
} }
ret = filemap_read(iocb, to, ret); ret = filemap_read(iocb, to, ret);
reexpand:
if (unlikely(shorted)) if (unlikely(shorted))
iov_iter_reexpand(to, iov_iter_count(to) + shorted); iov_iter_reexpand(to, iov_iter_count(to) + shorted);
return ret; return ret;
......
...@@ -5869,10 +5869,6 @@ int md_run(struct mddev *mddev) ...@@ -5869,10 +5869,6 @@ int md_run(struct mddev *mddev)
nowait = nowait && blk_queue_nowait(bdev_get_queue(rdev->bdev)); nowait = nowait && blk_queue_nowait(bdev_get_queue(rdev->bdev));
} }
/* Set the NOWAIT flags if all underlying devices support it */
if (nowait)
blk_queue_flag_set(QUEUE_FLAG_NOWAIT, mddev->queue);
if (!bioset_initialized(&mddev->bio_set)) { if (!bioset_initialized(&mddev->bio_set)) {
err = bioset_init(&mddev->bio_set, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS); err = bioset_init(&mddev->bio_set, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS);
if (err) if (err)
...@@ -6010,6 +6006,10 @@ int md_run(struct mddev *mddev) ...@@ -6010,6 +6006,10 @@ int md_run(struct mddev *mddev)
else else
blk_queue_flag_clear(QUEUE_FLAG_NONROT, mddev->queue); blk_queue_flag_clear(QUEUE_FLAG_NONROT, mddev->queue);
blk_queue_flag_set(QUEUE_FLAG_IO_STAT, mddev->queue); blk_queue_flag_set(QUEUE_FLAG_IO_STAT, mddev->queue);
/* Set the NOWAIT flags if all underlying devices support it */
if (nowait)
blk_queue_flag_set(QUEUE_FLAG_NOWAIT, mddev->queue);
} }
if (pers->sync_request) { if (pers->sync_request) {
if (mddev->kobj.sd && if (mddev->kobj.sd &&
......
...@@ -4253,7 +4253,14 @@ static void nvme_async_event_work(struct work_struct *work) ...@@ -4253,7 +4253,14 @@ static void nvme_async_event_work(struct work_struct *work)
container_of(work, struct nvme_ctrl, async_event_work); container_of(work, struct nvme_ctrl, async_event_work);
nvme_aen_uevent(ctrl); nvme_aen_uevent(ctrl);
ctrl->ops->submit_async_event(ctrl);
/*
* The transport drivers must guarantee AER submission here is safe by
* flushing ctrl async_event_work after changing the controller state
* from LIVE and before freeing the admin queue.
*/
if (ctrl->state == NVME_CTRL_LIVE)
ctrl->ops->submit_async_event(ctrl);
} }
static bool nvme_ctrl_pp_status(struct nvme_ctrl *ctrl) static bool nvme_ctrl_pp_status(struct nvme_ctrl *ctrl)
......
...@@ -170,6 +170,7 @@ nvmf_ctlr_matches_baseopts(struct nvme_ctrl *ctrl, ...@@ -170,6 +170,7 @@ nvmf_ctlr_matches_baseopts(struct nvme_ctrl *ctrl,
struct nvmf_ctrl_options *opts) struct nvmf_ctrl_options *opts)
{ {
if (ctrl->state == NVME_CTRL_DELETING || if (ctrl->state == NVME_CTRL_DELETING ||
ctrl->state == NVME_CTRL_DELETING_NOIO ||
ctrl->state == NVME_CTRL_DEAD || ctrl->state == NVME_CTRL_DEAD ||
strcmp(opts->subsysnqn, ctrl->opts->subsysnqn) || strcmp(opts->subsysnqn, ctrl->opts->subsysnqn) ||
strcmp(opts->host->nqn, ctrl->opts->host->nqn) || strcmp(opts->host->nqn, ctrl->opts->host->nqn) ||
......
...@@ -1200,6 +1200,7 @@ static void nvme_rdma_error_recovery_work(struct work_struct *work) ...@@ -1200,6 +1200,7 @@ static void nvme_rdma_error_recovery_work(struct work_struct *work)
struct nvme_rdma_ctrl, err_work); struct nvme_rdma_ctrl, err_work);
nvme_stop_keep_alive(&ctrl->ctrl); nvme_stop_keep_alive(&ctrl->ctrl);
flush_work(&ctrl->ctrl.async_event_work);
nvme_rdma_teardown_io_queues(ctrl, false); nvme_rdma_teardown_io_queues(ctrl, false);
nvme_start_queues(&ctrl->ctrl); nvme_start_queues(&ctrl->ctrl);
nvme_rdma_teardown_admin_queue(ctrl, false); nvme_rdma_teardown_admin_queue(ctrl, false);
......
...@@ -2096,6 +2096,7 @@ static void nvme_tcp_error_recovery_work(struct work_struct *work) ...@@ -2096,6 +2096,7 @@ static void nvme_tcp_error_recovery_work(struct work_struct *work)
struct nvme_ctrl *ctrl = &tcp_ctrl->ctrl; struct nvme_ctrl *ctrl = &tcp_ctrl->ctrl;
nvme_stop_keep_alive(ctrl); nvme_stop_keep_alive(ctrl);
flush_work(&ctrl->async_event_work);
nvme_tcp_teardown_io_queues(ctrl, false); nvme_tcp_teardown_io_queues(ctrl, false);
/* unquiesce to fail fast pending requests */ /* unquiesce to fail fast pending requests */
nvme_start_queues(ctrl); nvme_start_queues(ctrl);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment