Commit 56015910 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'io_uring-5.13-2021-05-14' of git://git.kernel.dk/linux-block

Pull io_uring fixes from Jens Axboe:
 "Just a few minor fixes/changes:

   - Fix issue with double free race for linked timeout completions

   - Fix reference issue with timeouts

   - Remove last few places that make SQPOLL special, since it's just an
     io thread now.

   - Bump maximum allowed registered buffers, as we don't allocate as
     much anymore"

* tag 'io_uring-5.13-2021-05-14' of git://git.kernel.dk/linux-block:
  io_uring: increase max number of reg buffers
  io_uring: further remove sqpoll limits on opcodes
  io_uring: fix ltout double free on completion race
  io_uring: fix link timeout refs
parents 41f035c0 489809e2
...@@ -100,6 +100,8 @@ ...@@ -100,6 +100,8 @@
#define IORING_MAX_RESTRICTIONS (IORING_RESTRICTION_LAST + \ #define IORING_MAX_RESTRICTIONS (IORING_RESTRICTION_LAST + \
IORING_REGISTER_LAST + IORING_OP_LAST) IORING_REGISTER_LAST + IORING_OP_LAST)
#define IORING_MAX_REG_BUFFERS (1U << 14)
#define SQE_VALID_FLAGS (IOSQE_FIXED_FILE|IOSQE_IO_DRAIN|IOSQE_IO_LINK| \ #define SQE_VALID_FLAGS (IOSQE_FIXED_FILE|IOSQE_IO_DRAIN|IOSQE_IO_LINK| \
IOSQE_IO_HARDLINK | IOSQE_ASYNC | \ IOSQE_IO_HARDLINK | IOSQE_ASYNC | \
IOSQE_BUFFER_SELECT) IOSQE_BUFFER_SELECT)
...@@ -4035,7 +4037,7 @@ static int io_epoll_ctl_prep(struct io_kiocb *req, ...@@ -4035,7 +4037,7 @@ static int io_epoll_ctl_prep(struct io_kiocb *req,
#if defined(CONFIG_EPOLL) #if defined(CONFIG_EPOLL)
if (sqe->ioprio || sqe->buf_index) if (sqe->ioprio || sqe->buf_index)
return -EINVAL; return -EINVAL;
if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL | IORING_SETUP_SQPOLL))) if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
return -EINVAL; return -EINVAL;
req->epoll.epfd = READ_ONCE(sqe->fd); req->epoll.epfd = READ_ONCE(sqe->fd);
...@@ -4150,7 +4152,7 @@ static int io_fadvise(struct io_kiocb *req, unsigned int issue_flags) ...@@ -4150,7 +4152,7 @@ static int io_fadvise(struct io_kiocb *req, unsigned int issue_flags)
static int io_statx_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) static int io_statx_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{ {
if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL | IORING_SETUP_SQPOLL))) if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
return -EINVAL; return -EINVAL;
if (sqe->ioprio || sqe->buf_index) if (sqe->ioprio || sqe->buf_index)
return -EINVAL; return -EINVAL;
...@@ -5827,8 +5829,6 @@ static int io_async_cancel(struct io_kiocb *req, unsigned int issue_flags) ...@@ -5827,8 +5829,6 @@ static int io_async_cancel(struct io_kiocb *req, unsigned int issue_flags)
static int io_rsrc_update_prep(struct io_kiocb *req, static int io_rsrc_update_prep(struct io_kiocb *req,
const struct io_uring_sqe *sqe) const struct io_uring_sqe *sqe)
{ {
if (unlikely(req->ctx->flags & IORING_SETUP_SQPOLL))
return -EINVAL;
if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT))) if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
return -EINVAL; return -EINVAL;
if (sqe->ioprio || sqe->rw_flags) if (sqe->ioprio || sqe->rw_flags)
...@@ -6354,19 +6354,20 @@ static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer) ...@@ -6354,19 +6354,20 @@ static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer)
* We don't expect the list to be empty, that will only happen if we * We don't expect the list to be empty, that will only happen if we
* race with the completion of the linked work. * race with the completion of the linked work.
*/ */
if (prev && req_ref_inc_not_zero(prev)) if (prev) {
io_remove_next_linked(prev); io_remove_next_linked(prev);
else if (!req_ref_inc_not_zero(prev))
prev = NULL; prev = NULL;
}
spin_unlock_irqrestore(&ctx->completion_lock, flags); spin_unlock_irqrestore(&ctx->completion_lock, flags);
if (prev) { if (prev) {
io_async_find_and_cancel(ctx, req, prev->user_data, -ETIME); io_async_find_and_cancel(ctx, req, prev->user_data, -ETIME);
io_put_req_deferred(prev, 1); io_put_req_deferred(prev, 1);
io_put_req_deferred(req, 1);
} else { } else {
io_req_complete_post(req, -ETIME, 0); io_req_complete_post(req, -ETIME, 0);
} }
io_put_req_deferred(req, 1);
return HRTIMER_NORESTART; return HRTIMER_NORESTART;
} }
...@@ -8390,7 +8391,7 @@ static int io_sqe_buffers_register(struct io_ring_ctx *ctx, void __user *arg, ...@@ -8390,7 +8391,7 @@ static int io_sqe_buffers_register(struct io_ring_ctx *ctx, void __user *arg,
if (ctx->user_bufs) if (ctx->user_bufs)
return -EBUSY; return -EBUSY;
if (!nr_args || nr_args > UIO_MAXIOV) if (!nr_args || nr_args > IORING_MAX_REG_BUFFERS)
return -EINVAL; return -EINVAL;
ret = io_rsrc_node_switch_start(ctx); ret = io_rsrc_node_switch_start(ctx);
if (ret) if (ret)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment