Commit eefdf30f authored by Jens Axboe's avatar Jens Axboe

io_uring: fix IOPOLL -EAGAIN retries

This normally isn't hit, as polling is mostly done on NVMe with deep
queue depths. But if we do run into request starvation, we need to
ensure that retries are properly serialized.
Reported-by: default avatarAndres Freund <andres@anarazel.de>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 56450c20
......@@ -1150,7 +1150,7 @@ static void io_prep_async_work(struct io_kiocb *req)
io_req_init_async(req);
if (req->flags & REQ_F_ISREG) {
if (def->hash_reg_file)
if (def->hash_reg_file || (req->ctx->flags & IORING_SETUP_IOPOLL))
io_wq_hash_work(&req->work, file_inode(req->file));
} else {
if (def->unbound_nonreg_file)
......@@ -3132,6 +3132,7 @@ static int io_read(struct io_kiocb *req, bool force_nonblock,
ret = io_import_iovec(READ, req, &iovec, iter, !force_nonblock);
if (ret < 0)
return ret;
iov_count = iov_iter_count(iter);
io_size = ret;
req->result = io_size;
ret = 0;
......@@ -3144,7 +3145,6 @@ static int io_read(struct io_kiocb *req, bool force_nonblock,
if (force_nonblock && !io_file_supports_async(req->file, READ))
goto copy_iov;
iov_count = iov_iter_count(iter);
ret = rw_verify_area(READ, req->file, io_kiocb_ppos(kiocb), iov_count);
if (unlikely(ret))
goto out_free;
......@@ -3157,7 +3157,8 @@ static int io_read(struct io_kiocb *req, bool force_nonblock,
ret = 0;
goto out_free;
} else if (ret == -EAGAIN) {
if (!force_nonblock)
/* IOPOLL retry should happen for io-wq threads */
if (!force_nonblock && !(req->ctx->flags & IORING_SETUP_IOPOLL))
goto done;
/* some cases will consume bytes even on error returns */
iov_iter_revert(iter, iov_count - iov_iter_count(iter));
......@@ -3251,6 +3252,7 @@ static int io_write(struct io_kiocb *req, bool force_nonblock,
ret = io_import_iovec(WRITE, req, &iovec, iter, !force_nonblock);
if (ret < 0)
return ret;
iov_count = iov_iter_count(iter);
io_size = ret;
req->result = io_size;
......@@ -3267,7 +3269,6 @@ static int io_write(struct io_kiocb *req, bool force_nonblock,
(req->flags & REQ_F_ISREG))
goto copy_iov;
iov_count = iov_iter_count(iter);
ret = rw_verify_area(WRITE, req->file, io_kiocb_ppos(kiocb), iov_count);
if (unlikely(ret))
goto out_free;
......@@ -3301,11 +3302,14 @@ static int io_write(struct io_kiocb *req, bool force_nonblock,
if (ret2 == -EOPNOTSUPP && (kiocb->ki_flags & IOCB_NOWAIT))
ret2 = -EAGAIN;
if (!force_nonblock || ret2 != -EAGAIN) {
/* IOPOLL retry should happen for io-wq threads */
if ((req->ctx->flags & IORING_SETUP_IOPOLL) && ret2 == -EAGAIN)
goto copy_iov;
kiocb_done(kiocb, ret2, cs);
} else {
copy_iov:
/* some cases will consume bytes even on error returns */
iov_iter_revert(iter, iov_count - iov_iter_count(iter));
copy_iov:
ret = io_setup_async_rw(req, iovec, inline_vecs, iter, false);
if (!ret)
return -EAGAIN;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment