Commit 7b8731d9 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'block-5.9-2020-09-11' of git://git.kernel.dk/linux-block

Pull block fixes from Jens Axboe:

 - Fix a regression in bdev partition locking (Christoph)

 - NVMe pull request from Christoph:
      - cancel async events before freeing them (David Milburn)
      - revert a broken race fix (James Smart)
      - fix command processing during resets (Sagi Grimberg)

 - Fix a kyber crash with requeued flushes (Omar)

 - Fix __bio_try_merge_page() same_page error for no merging (Ritesh)

* tag 'block-5.9-2020-09-11' of git://git.kernel.dk/linux-block:
  block: Set same_page to false in __bio_try_merge_page if ret is false
  nvme-fabrics: allow to queue requests for live queues
  block: only call sched requeue_request() for scheduled requests
  nvme-tcp: cancel async events before freeing event struct
  nvme-rdma: cancel async events before freeing event struct
  nvme-fc: cancel async events before freeing event struct
  nvme: Revert: Fix controller creation races with teardown flow
  block: restore a specific error code in bdev_del_partition
parents e8878ab8 fd04358e
...@@ -5895,18 +5895,6 @@ static void bfq_finish_requeue_request(struct request *rq) ...@@ -5895,18 +5895,6 @@ static void bfq_finish_requeue_request(struct request *rq)
struct bfq_queue *bfqq = RQ_BFQQ(rq); struct bfq_queue *bfqq = RQ_BFQQ(rq);
struct bfq_data *bfqd; struct bfq_data *bfqd;
/*
* Requeue and finish hooks are invoked in blk-mq without
* checking whether the involved request is actually still
* referenced in the scheduler. To handle this fact, the
* following two checks make this function exit in case of
* spurious invocations, for which there is nothing to do.
*
* First, check whether rq has nothing to do with an elevator.
*/
if (unlikely(!(rq->rq_flags & RQF_ELVPRIV)))
return;
/* /*
* rq either is not associated with any icq, or is an already * rq either is not associated with any icq, or is an already
* requeued request that has not (yet) been re-inserted into * requeued request that has not (yet) been re-inserted into
......
...@@ -879,8 +879,10 @@ bool __bio_try_merge_page(struct bio *bio, struct page *page, ...@@ -879,8 +879,10 @@ bool __bio_try_merge_page(struct bio *bio, struct page *page,
struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt - 1]; struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt - 1];
if (page_is_mergeable(bv, page, len, off, same_page)) { if (page_is_mergeable(bv, page, len, off, same_page)) {
if (bio->bi_iter.bi_size > UINT_MAX - len) if (bio->bi_iter.bi_size > UINT_MAX - len) {
*same_page = false;
return false; return false;
}
bv->bv_len += len; bv->bv_len += len;
bio->bi_iter.bi_size += len; bio->bi_iter.bi_size += len;
return true; return true;
......
...@@ -66,7 +66,7 @@ static inline void blk_mq_sched_requeue_request(struct request *rq) ...@@ -66,7 +66,7 @@ static inline void blk_mq_sched_requeue_request(struct request *rq)
struct request_queue *q = rq->q; struct request_queue *q = rq->q;
struct elevator_queue *e = q->elevator; struct elevator_queue *e = q->elevator;
if (e && e->type->ops.requeue_request) if ((rq->rq_flags & RQF_ELVPRIV) && e && e->type->ops.requeue_request)
e->type->ops.requeue_request(rq); e->type->ops.requeue_request(rq);
} }
......
...@@ -537,7 +537,7 @@ int bdev_del_partition(struct block_device *bdev, int partno) ...@@ -537,7 +537,7 @@ int bdev_del_partition(struct block_device *bdev, int partno)
bdevp = bdget_disk(bdev->bd_disk, partno); bdevp = bdget_disk(bdev->bd_disk, partno);
if (!bdevp) if (!bdevp)
return -ENOMEM; return -ENXIO;
mutex_lock(&bdevp->bd_mutex); mutex_lock(&bdevp->bd_mutex);
mutex_lock_nested(&bdev->bd_mutex, 1); mutex_lock_nested(&bdev->bd_mutex, 1);
......
...@@ -3525,10 +3525,6 @@ static ssize_t nvme_sysfs_delete(struct device *dev, ...@@ -3525,10 +3525,6 @@ static ssize_t nvme_sysfs_delete(struct device *dev,
{ {
struct nvme_ctrl *ctrl = dev_get_drvdata(dev); struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
/* Can't delete non-created controllers */
if (!ctrl->created)
return -EBUSY;
if (device_remove_file_self(dev, attr)) if (device_remove_file_self(dev, attr))
nvme_delete_ctrl_sync(ctrl); nvme_delete_ctrl_sync(ctrl);
return count; return count;
...@@ -4403,7 +4399,6 @@ void nvme_start_ctrl(struct nvme_ctrl *ctrl) ...@@ -4403,7 +4399,6 @@ void nvme_start_ctrl(struct nvme_ctrl *ctrl)
nvme_queue_scan(ctrl); nvme_queue_scan(ctrl);
nvme_start_queues(ctrl); nvme_start_queues(ctrl);
} }
ctrl->created = true;
} }
EXPORT_SYMBOL_GPL(nvme_start_ctrl); EXPORT_SYMBOL_GPL(nvme_start_ctrl);
......
...@@ -565,10 +565,14 @@ bool __nvmf_check_ready(struct nvme_ctrl *ctrl, struct request *rq, ...@@ -565,10 +565,14 @@ bool __nvmf_check_ready(struct nvme_ctrl *ctrl, struct request *rq,
struct nvme_request *req = nvme_req(rq); struct nvme_request *req = nvme_req(rq);
/* /*
* If we are in some state of setup or teardown only allow * currently we have a problem sending passthru commands
* internally generated commands. * on the admin_q if the controller is not LIVE because we can't
* make sure that they are going out after the admin connect,
* controller enable and/or other commands in the initialization
* sequence. until the controller will be LIVE, fail with
* BLK_STS_RESOURCE so that they will be rescheduled.
*/ */
if (!blk_rq_is_passthrough(rq) || (req->flags & NVME_REQ_USERCMD)) if (rq->q == ctrl->admin_q && (req->flags & NVME_REQ_USERCMD))
return false; return false;
/* /*
...@@ -577,7 +581,7 @@ bool __nvmf_check_ready(struct nvme_ctrl *ctrl, struct request *rq, ...@@ -577,7 +581,7 @@ bool __nvmf_check_ready(struct nvme_ctrl *ctrl, struct request *rq,
*/ */
switch (ctrl->state) { switch (ctrl->state) {
case NVME_CTRL_CONNECTING: case NVME_CTRL_CONNECTING:
if (nvme_is_fabrics(req->cmd) && if (blk_rq_is_passthrough(rq) && nvme_is_fabrics(req->cmd) &&
req->cmd->fabrics.fctype == nvme_fabrics_type_connect) req->cmd->fabrics.fctype == nvme_fabrics_type_connect)
return true; return true;
break; break;
......
...@@ -2160,6 +2160,7 @@ nvme_fc_term_aen_ops(struct nvme_fc_ctrl *ctrl) ...@@ -2160,6 +2160,7 @@ nvme_fc_term_aen_ops(struct nvme_fc_ctrl *ctrl)
struct nvme_fc_fcp_op *aen_op; struct nvme_fc_fcp_op *aen_op;
int i; int i;
cancel_work_sync(&ctrl->ctrl.async_event_work);
aen_op = ctrl->aen_ops; aen_op = ctrl->aen_ops;
for (i = 0; i < NVME_NR_AEN_COMMANDS; i++, aen_op++) { for (i = 0; i < NVME_NR_AEN_COMMANDS; i++, aen_op++) {
__nvme_fc_exit_request(ctrl, aen_op); __nvme_fc_exit_request(ctrl, aen_op);
......
...@@ -307,7 +307,6 @@ struct nvme_ctrl { ...@@ -307,7 +307,6 @@ struct nvme_ctrl {
struct nvme_command ka_cmd; struct nvme_command ka_cmd;
struct work_struct fw_act_work; struct work_struct fw_act_work;
unsigned long events; unsigned long events;
bool created;
#ifdef CONFIG_NVME_MULTIPATH #ifdef CONFIG_NVME_MULTIPATH
/* asymmetric namespace access: */ /* asymmetric namespace access: */
......
...@@ -835,6 +835,7 @@ static void nvme_rdma_destroy_admin_queue(struct nvme_rdma_ctrl *ctrl, ...@@ -835,6 +835,7 @@ static void nvme_rdma_destroy_admin_queue(struct nvme_rdma_ctrl *ctrl,
blk_mq_free_tag_set(ctrl->ctrl.admin_tagset); blk_mq_free_tag_set(ctrl->ctrl.admin_tagset);
} }
if (ctrl->async_event_sqe.data) { if (ctrl->async_event_sqe.data) {
cancel_work_sync(&ctrl->ctrl.async_event_work);
nvme_rdma_free_qe(ctrl->device->dev, &ctrl->async_event_sqe, nvme_rdma_free_qe(ctrl->device->dev, &ctrl->async_event_sqe,
sizeof(struct nvme_command), DMA_TO_DEVICE); sizeof(struct nvme_command), DMA_TO_DEVICE);
ctrl->async_event_sqe.data = NULL; ctrl->async_event_sqe.data = NULL;
......
...@@ -1596,6 +1596,7 @@ static struct blk_mq_tag_set *nvme_tcp_alloc_tagset(struct nvme_ctrl *nctrl, ...@@ -1596,6 +1596,7 @@ static struct blk_mq_tag_set *nvme_tcp_alloc_tagset(struct nvme_ctrl *nctrl,
static void nvme_tcp_free_admin_queue(struct nvme_ctrl *ctrl) static void nvme_tcp_free_admin_queue(struct nvme_ctrl *ctrl)
{ {
if (to_tcp_ctrl(ctrl)->async_req.pdu) { if (to_tcp_ctrl(ctrl)->async_req.pdu) {
cancel_work_sync(&ctrl->async_event_work);
nvme_tcp_free_async_req(to_tcp_ctrl(ctrl)); nvme_tcp_free_async_req(to_tcp_ctrl(ctrl));
to_tcp_ctrl(ctrl)->async_req.pdu = NULL; to_tcp_ctrl(ctrl)->async_req.pdu = NULL;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment