Commit b9bd6806 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-linus-20190823' of git://git.kernel.dk/linux-block

Pull block fixes from Jens Axboe:
 "Here's a set of fixes that should go into this release. This contains:

   - Three minor fixes for NVMe.

   - Three minor tweaks for the io_uring polling logic.

   - Officially mark Song as the MD maintainer, after he's been filling
     that role sucessfully for the last 6 months or so"

* tag 'for-linus-20190823' of git://git.kernel.dk/linux-block:
  io_uring: add need_resched() check in inner poll loop
  md: update MAINTAINERS info
  io_uring: don't enter poll loop if we have CQEs pending
  nvme: Add quirk for LiteON CL1 devices running FW 22301111
  nvme: Fix cntlid validation when not using NVMEoF
  nvme-multipath: fix possible I/O hang when paths are updated
  io_uring: fix potential hang with polled IO
parents dd469a45 08f5439f
......@@ -14883,9 +14883,9 @@ F: include/linux/arm_sdei.h
F: include/uapi/linux/arm_sdei.h
SOFTWARE RAID (Multiple Disks) SUPPORT
M: Shaohua Li <shli@kernel.org>
M: Song Liu <song@kernel.org>
L: linux-raid@vger.kernel.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/shli/md.git
T: git git://git.kernel.org/pub/scm/linux/kernel/git/song/md.git
S: Supported
F: drivers/md/Makefile
F: drivers/md/Kconfig
......
......@@ -2257,6 +2257,16 @@ static const struct nvme_core_quirk_entry core_quirks[] = {
.vid = 0x1179,
.mn = "THNSF5256GPUK TOSHIBA",
.quirks = NVME_QUIRK_NO_APST,
},
{
/*
* This LiteON CL1-3D*-Q11 firmware version has a race
* condition associated with actions related to suspend to idle
* LiteON has resolved the problem in future firmware
*/
.vid = 0x14a4,
.fr = "22301111",
.quirks = NVME_QUIRK_SIMPLE_SUSPEND,
}
};
......@@ -2597,6 +2607,9 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
goto out_free;
}
if (!(ctrl->ops->flags & NVME_F_FABRICS))
ctrl->cntlid = le16_to_cpu(id->cntlid);
if (!ctrl->identified) {
int i;
......@@ -2697,7 +2710,6 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
goto out_free;
}
} else {
ctrl->cntlid = le16_to_cpu(id->cntlid);
ctrl->hmpre = le32_to_cpu(id->hmpre);
ctrl->hmmin = le32_to_cpu(id->hmmin);
ctrl->hmminds = le32_to_cpu(id->hmminds);
......
......@@ -428,6 +428,7 @@ static void nvme_mpath_set_live(struct nvme_ns *ns)
srcu_read_unlock(&head->srcu, srcu_idx);
}
synchronize_srcu(&ns->head->srcu);
kblockd_schedule_work(&ns->head->requeue_work);
}
......
......@@ -92,6 +92,11 @@ enum nvme_quirks {
* Broken Write Zeroes.
*/
NVME_QUIRK_DISABLE_WRITE_ZEROES = (1 << 9),
/*
* Force simple suspend/resume path.
*/
NVME_QUIRK_SIMPLE_SUSPEND = (1 << 10),
};
/*
......
......@@ -2876,7 +2876,8 @@ static int nvme_suspend(struct device *dev)
* state (which may not be possible if the link is up).
*/
if (pm_suspend_via_firmware() || !ctrl->npss ||
!pcie_aspm_enabled(pdev)) {
!pcie_aspm_enabled(pdev) ||
(ndev->ctrl.quirks & NVME_QUIRK_SIMPLE_SUSPEND)) {
nvme_dev_disable(ndev, true);
return 0;
}
......
......@@ -679,6 +679,13 @@ static void io_put_req(struct io_kiocb *req)
io_free_req(req);
}
static unsigned io_cqring_events(struct io_cq_ring *ring)
{
/* See comment at the top of this file */
smp_rmb();
return READ_ONCE(ring->r.tail) - READ_ONCE(ring->r.head);
}
/*
* Find and free completed poll iocbs
*/
......@@ -771,7 +778,7 @@ static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events,
static int io_iopoll_getevents(struct io_ring_ctx *ctx, unsigned int *nr_events,
long min)
{
while (!list_empty(&ctx->poll_list)) {
while (!list_empty(&ctx->poll_list) && !need_resched()) {
int ret;
ret = io_do_iopoll(ctx, nr_events, min);
......@@ -798,6 +805,12 @@ static void io_iopoll_reap_events(struct io_ring_ctx *ctx)
unsigned int nr_events = 0;
io_iopoll_getevents(ctx, &nr_events, 1);
/*
* Ensure we allow local-to-the-cpu processing to take place,
* in this case we need to ensure that we reap all events.
*/
cond_resched();
}
mutex_unlock(&ctx->uring_lock);
}
......@@ -805,11 +818,42 @@ static void io_iopoll_reap_events(struct io_ring_ctx *ctx)
static int io_iopoll_check(struct io_ring_ctx *ctx, unsigned *nr_events,
long min)
{
int ret = 0;
int iters, ret = 0;
/*
* We disallow the app entering submit/complete with polling, but we
* still need to lock the ring to prevent racing with polled issue
* that got punted to a workqueue.
*/
mutex_lock(&ctx->uring_lock);
iters = 0;
do {
int tmin = 0;
/*
* Don't enter poll loop if we already have events pending.
* If we do, we can potentially be spinning for commands that
* already triggered a CQE (eg in error).
*/
if (io_cqring_events(ctx->cq_ring))
break;
/*
* If a submit got punted to a workqueue, we can have the
* application entering polling for a command before it gets
* issued. That app will hold the uring_lock for the duration
* of the poll right here, so we need to take a breather every
* now and then to ensure that the issue has a chance to add
* the poll to the issued list. Otherwise we can spin here
* forever, while the workqueue is stuck trying to acquire the
* very same mutex.
*/
if (!(++iters & 7)) {
mutex_unlock(&ctx->uring_lock);
mutex_lock(&ctx->uring_lock);
}
if (*nr_events < min)
tmin = min - *nr_events;
......@@ -819,6 +863,7 @@ static int io_iopoll_check(struct io_ring_ctx *ctx, unsigned *nr_events,
ret = 0;
} while (min && !*nr_events && !need_resched());
mutex_unlock(&ctx->uring_lock);
return ret;
}
......@@ -2280,15 +2325,7 @@ static int io_sq_thread(void *data)
unsigned nr_events = 0;
if (ctx->flags & IORING_SETUP_IOPOLL) {
/*
* We disallow the app entering submit/complete
* with polling, but we still need to lock the
* ring to prevent racing with polled issue
* that got punted to a workqueue.
*/
mutex_lock(&ctx->uring_lock);
io_iopoll_check(ctx, &nr_events, 0);
mutex_unlock(&ctx->uring_lock);
} else {
/*
* Normal IO, just pretend everything completed.
......@@ -2433,13 +2470,6 @@ static int io_ring_submit(struct io_ring_ctx *ctx, unsigned int to_submit)
return submit;
}
static unsigned io_cqring_events(struct io_cq_ring *ring)
{
/* See comment at the top of this file */
smp_rmb();
return READ_ONCE(ring->r.tail) - READ_ONCE(ring->r.head);
}
/*
* Wait until events become available, if we don't already have some. The
* application must reap them itself, as they reside on the shared cq ring.
......@@ -3190,9 +3220,7 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
min_complete = min(min_complete, ctx->cq_entries);
if (ctx->flags & IORING_SETUP_IOPOLL) {
mutex_lock(&ctx->uring_lock);
ret = io_iopoll_check(ctx, &nr_events, min_complete);
mutex_unlock(&ctx->uring_lock);
} else {
ret = io_cqring_wait(ctx, min_complete, sig, sigsz);
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment