Commit a66e4cbf authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'io_uring-6.1-2022-11-18' of git://git.kernel.dk/linux

Pull io_uring fixes from Jens Axboe:
 "This is mostly fixing issues around the poll rework, but also two
  tweaks for the multishot handling for accept and receive.

  All stable material"

* tag 'io_uring-6.1-2022-11-18' of git://git.kernel.dk/linux:
  io_uring: disallow self-propelled ring polling
  io_uring: fix multishot recv request leaks
  io_uring: fix multishot accept request leaks
  io_uring: fix tw losing poll events
  io_uring: update res mask in io_poll_check_events
parents 23a60a03 7fdbc5f0
......@@ -16,6 +16,9 @@ enum io_uring_cmd_flags {
IO_URING_F_SQE128 = 4,
IO_URING_F_CQE32 = 8,
IO_URING_F_IOPOLL = 16,
/* the request is executed from poll, it should not be freed */
IO_URING_F_MULTISHOT = 32,
};
struct io_uring_cmd {
......
......@@ -1768,7 +1768,7 @@ int io_poll_issue(struct io_kiocb *req, bool *locked)
io_tw_lock(req->ctx, locked);
if (unlikely(req->task->flags & PF_EXITING))
return -EFAULT;
return io_issue_sqe(req, IO_URING_F_NONBLOCK);
return io_issue_sqe(req, IO_URING_F_NONBLOCK|IO_URING_F_MULTISHOT);
}
struct io_wq_work *io_wq_free_work(struct io_wq_work *work)
......
......@@ -17,8 +17,8 @@ enum {
IOU_ISSUE_SKIP_COMPLETE = -EIOCBQUEUED,
/*
* Intended only when both REQ_F_POLLED and REQ_F_APOLL_MULTISHOT
* are set to indicate to the poll runner that multishot should be
* Intended only when both IO_URING_F_MULTISHOT is passed
* to indicate to the poll runner that multishot should be
* removed and the result is set on req->cqe.res.
*/
IOU_STOP_MULTISHOT = -ECANCELED,
......
......@@ -67,8 +67,6 @@ struct io_sr_msg {
struct io_kiocb *notif;
};
#define IO_APOLL_MULTI_POLLED (REQ_F_APOLL_MULTISHOT | REQ_F_POLLED)
int io_shutdown_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
struct io_shutdown *shutdown = io_kiocb_to_cmd(req, struct io_shutdown);
......@@ -591,7 +589,8 @@ static inline void io_recv_prep_retry(struct io_kiocb *req)
* again (for multishot).
*/
static inline bool io_recv_finish(struct io_kiocb *req, int *ret,
unsigned int cflags, bool mshot_finished)
unsigned int cflags, bool mshot_finished,
unsigned issue_flags)
{
if (!(req->flags & REQ_F_APOLL_MULTISHOT)) {
io_req_set_res(req, *ret, cflags);
......@@ -614,7 +613,7 @@ static inline bool io_recv_finish(struct io_kiocb *req, int *ret,
io_req_set_res(req, *ret, cflags);
if (req->flags & REQ_F_POLLED)
if (issue_flags & IO_URING_F_MULTISHOT)
*ret = IOU_STOP_MULTISHOT;
else
*ret = IOU_OK;
......@@ -773,8 +772,7 @@ int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags)
if (ret < min_ret) {
if (ret == -EAGAIN && force_nonblock) {
ret = io_setup_async_msg(req, kmsg, issue_flags);
if (ret == -EAGAIN && (req->flags & IO_APOLL_MULTI_POLLED) ==
IO_APOLL_MULTI_POLLED) {
if (ret == -EAGAIN && (issue_flags & IO_URING_F_MULTISHOT)) {
io_kbuf_recycle(req, issue_flags);
return IOU_ISSUE_SKIP_COMPLETE;
}
......@@ -803,7 +801,7 @@ int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags)
if (kmsg->msg.msg_inq)
cflags |= IORING_CQE_F_SOCK_NONEMPTY;
if (!io_recv_finish(req, &ret, cflags, mshot_finished))
if (!io_recv_finish(req, &ret, cflags, mshot_finished, issue_flags))
goto retry_multishot;
if (mshot_finished) {
......@@ -869,7 +867,7 @@ int io_recv(struct io_kiocb *req, unsigned int issue_flags)
ret = sock_recvmsg(sock, &msg, flags);
if (ret < min_ret) {
if (ret == -EAGAIN && force_nonblock) {
if ((req->flags & IO_APOLL_MULTI_POLLED) == IO_APOLL_MULTI_POLLED) {
if (issue_flags & IO_URING_F_MULTISHOT) {
io_kbuf_recycle(req, issue_flags);
return IOU_ISSUE_SKIP_COMPLETE;
}
......@@ -902,7 +900,7 @@ int io_recv(struct io_kiocb *req, unsigned int issue_flags)
if (msg.msg_inq)
cflags |= IORING_CQE_F_SOCK_NONEMPTY;
if (!io_recv_finish(req, &ret, cflags, ret <= 0))
if (!io_recv_finish(req, &ret, cflags, ret <= 0, issue_flags))
goto retry_multishot;
return ret;
......@@ -1289,8 +1287,7 @@ int io_accept(struct io_kiocb *req, unsigned int issue_flags)
* return EAGAIN to arm the poll infra since it
* has already been done
*/
if ((req->flags & IO_APOLL_MULTI_POLLED) ==
IO_APOLL_MULTI_POLLED)
if (issue_flags & IO_URING_F_MULTISHOT)
ret = IOU_ISSUE_SKIP_COMPLETE;
return ret;
}
......@@ -1315,9 +1312,7 @@ int io_accept(struct io_kiocb *req, unsigned int issue_flags)
goto retry;
io_req_set_res(req, ret, 0);
if (req->flags & REQ_F_POLLED)
return IOU_STOP_MULTISHOT;
return IOU_OK;
return (issue_flags & IO_URING_F_MULTISHOT) ? IOU_STOP_MULTISHOT : IOU_OK;
}
int io_socket_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
......
......@@ -228,6 +228,13 @@ static int io_poll_check_events(struct io_kiocb *req, bool *locked)
return IOU_POLL_DONE;
if (v & IO_POLL_CANCEL_FLAG)
return -ECANCELED;
/*
* cqe.res contains only events of the first wake up
* and all others are be lost. Redo vfs_poll() to get
* up to date state.
*/
if ((v & IO_POLL_REF_MASK) != 1)
req->cqe.res = 0;
/* the mask was stashed in __io_poll_execute */
if (!req->cqe.res) {
......@@ -239,6 +246,8 @@ static int io_poll_check_events(struct io_kiocb *req, bool *locked)
continue;
if (req->apoll_events & EPOLLONESHOT)
return IOU_POLL_DONE;
if (io_is_uring_fops(req->file))
return IOU_POLL_DONE;
/* multishot, just fill a CQE and proceed */
if (!(req->flags & REQ_F_APOLL_MULTISHOT)) {
......@@ -258,6 +267,9 @@ static int io_poll_check_events(struct io_kiocb *req, bool *locked)
return ret;
}
/* force the next iteration to vfs_poll() */
req->cqe.res = 0;
/*
* Release all references, retry if someone tried to restart
* task_work while we were executing it.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment