Commit 0821de28 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-linus-20191101' of git://git.kernel.dk/linux-block

Pull block fixes from Jens Axboe:

 - Two small nvme fixes, one is a fabrics connection fix, the other one
   a cleanup made possible by that fix (Anton, via Keith)

 - Fix requeue handling in umb ubd (Anton)

 - Fix spin_lock_irq() nesting in blk-iocost (Dan)

 - Three small io_uring fixes:
     - Install io_uring fd after done with ctx (me)
     - Clear ->result before every poll issue (me)
     - Fix leak of shadow request on error (Pavel)

* tag 'for-linus-20191101' of git://git.kernel.dk/linux-block:
  iocost: don't nest spin_lock_irq in ioc_weight_write()
  io_uring: ensure we clear io_kiocb->result before each issue
  um-ubd: Entrust re-queue to the upper layers
  nvme-multipath: remove unused groups_only mode in ana log
  nvme-multipath: fix possible io hang after ctrl reconnect
  io_uring: don't touch ctx in setup after ring fd install
  io_uring: Fix leaked shadow_req
parents e5897c7d 41591a51
...@@ -1403,8 +1403,12 @@ static blk_status_t ubd_queue_rq(struct blk_mq_hw_ctx *hctx, ...@@ -1403,8 +1403,12 @@ static blk_status_t ubd_queue_rq(struct blk_mq_hw_ctx *hctx,
spin_unlock_irq(&ubd_dev->lock); spin_unlock_irq(&ubd_dev->lock);
if (ret < 0) if (ret < 0) {
blk_mq_requeue_request(req, true); if (ret == -ENOMEM)
res = BLK_STS_RESOURCE;
else
res = BLK_STS_DEV_RESOURCE;
}
return res; return res;
} }
......
...@@ -2110,10 +2110,10 @@ static ssize_t ioc_weight_write(struct kernfs_open_file *of, char *buf, ...@@ -2110,10 +2110,10 @@ static ssize_t ioc_weight_write(struct kernfs_open_file *of, char *buf,
goto einval; goto einval;
} }
spin_lock_irq(&iocg->ioc->lock); spin_lock(&iocg->ioc->lock);
iocg->cfg_weight = v; iocg->cfg_weight = v;
weight_updated(iocg); weight_updated(iocg);
spin_unlock_irq(&iocg->ioc->lock); spin_unlock(&iocg->ioc->lock);
blkg_conf_finish(&ctx); blkg_conf_finish(&ctx);
return nbytes; return nbytes;
......
...@@ -522,14 +522,13 @@ static int nvme_update_ana_state(struct nvme_ctrl *ctrl, ...@@ -522,14 +522,13 @@ static int nvme_update_ana_state(struct nvme_ctrl *ctrl,
return 0; return 0;
} }
static int nvme_read_ana_log(struct nvme_ctrl *ctrl, bool groups_only) static int nvme_read_ana_log(struct nvme_ctrl *ctrl)
{ {
u32 nr_change_groups = 0; u32 nr_change_groups = 0;
int error; int error;
mutex_lock(&ctrl->ana_lock); mutex_lock(&ctrl->ana_lock);
error = nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_ANA, error = nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_ANA, 0,
groups_only ? NVME_ANA_LOG_RGO : 0,
ctrl->ana_log_buf, ctrl->ana_log_size, 0); ctrl->ana_log_buf, ctrl->ana_log_size, 0);
if (error) { if (error) {
dev_warn(ctrl->device, "Failed to get ANA log: %d\n", error); dev_warn(ctrl->device, "Failed to get ANA log: %d\n", error);
...@@ -565,7 +564,7 @@ static void nvme_ana_work(struct work_struct *work) ...@@ -565,7 +564,7 @@ static void nvme_ana_work(struct work_struct *work)
{ {
struct nvme_ctrl *ctrl = container_of(work, struct nvme_ctrl, ana_work); struct nvme_ctrl *ctrl = container_of(work, struct nvme_ctrl, ana_work);
nvme_read_ana_log(ctrl, false); nvme_read_ana_log(ctrl);
} }
static void nvme_anatt_timeout(struct timer_list *t) static void nvme_anatt_timeout(struct timer_list *t)
...@@ -715,7 +714,7 @@ int nvme_mpath_init(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id) ...@@ -715,7 +714,7 @@ int nvme_mpath_init(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
goto out; goto out;
} }
error = nvme_read_ana_log(ctrl, true); error = nvme_read_ana_log(ctrl);
if (error) if (error)
goto out_free_ana_log_buf; goto out_free_ana_log_buf;
return 0; return 0;
......
...@@ -1124,6 +1124,7 @@ static int io_prep_rw(struct io_kiocb *req, const struct sqe_submit *s, ...@@ -1124,6 +1124,7 @@ static int io_prep_rw(struct io_kiocb *req, const struct sqe_submit *s,
kiocb->ki_flags |= IOCB_HIPRI; kiocb->ki_flags |= IOCB_HIPRI;
kiocb->ki_complete = io_complete_rw_iopoll; kiocb->ki_complete = io_complete_rw_iopoll;
req->result = 0;
} else { } else {
if (kiocb->ki_flags & IOCB_HIPRI) if (kiocb->ki_flags & IOCB_HIPRI)
return -EINVAL; return -EINVAL;
...@@ -2413,6 +2414,7 @@ static int io_queue_link_head(struct io_ring_ctx *ctx, struct io_kiocb *req, ...@@ -2413,6 +2414,7 @@ static int io_queue_link_head(struct io_ring_ctx *ctx, struct io_kiocb *req,
if (ret) { if (ret) {
if (ret != -EIOCBQUEUED) { if (ret != -EIOCBQUEUED) {
io_free_req(req); io_free_req(req);
__io_free_req(shadow);
io_cqring_add_event(ctx, s->sqe->user_data, ret); io_cqring_add_event(ctx, s->sqe->user_data, ret);
return 0; return 0;
} }
...@@ -3828,10 +3830,6 @@ static int io_uring_create(unsigned entries, struct io_uring_params *p) ...@@ -3828,10 +3830,6 @@ static int io_uring_create(unsigned entries, struct io_uring_params *p)
if (ret) if (ret)
goto err; goto err;
ret = io_uring_get_fd(ctx);
if (ret < 0)
goto err;
memset(&p->sq_off, 0, sizeof(p->sq_off)); memset(&p->sq_off, 0, sizeof(p->sq_off));
p->sq_off.head = offsetof(struct io_rings, sq.head); p->sq_off.head = offsetof(struct io_rings, sq.head);
p->sq_off.tail = offsetof(struct io_rings, sq.tail); p->sq_off.tail = offsetof(struct io_rings, sq.tail);
...@@ -3849,6 +3847,14 @@ static int io_uring_create(unsigned entries, struct io_uring_params *p) ...@@ -3849,6 +3847,14 @@ static int io_uring_create(unsigned entries, struct io_uring_params *p)
p->cq_off.overflow = offsetof(struct io_rings, cq_overflow); p->cq_off.overflow = offsetof(struct io_rings, cq_overflow);
p->cq_off.cqes = offsetof(struct io_rings, cqes); p->cq_off.cqes = offsetof(struct io_rings, cqes);
/*
* Install ring fd as the very last thing, so we don't risk someone
* having closed it before we finish setup
*/
ret = io_uring_get_fd(ctx);
if (ret < 0)
goto err;
p->features = IORING_FEAT_SINGLE_MMAP; p->features = IORING_FEAT_SINGLE_MMAP;
return ret; return ret;
err: err:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment