Commit 2c41fab1 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'io_uring-5.12-2021-03-21' of git://git.kernel.dk/linux-block

Pull io_uring followup fixes from Jens Axboe:

 - The SIGSTOP change from Eric, so we properly ignore that for
   PF_IO_WORKER threads.

 - Disallow sending signals to PF_IO_WORKER threads in general, we're
   not interested in having them funnel back to the io_uring owning
   task.

 - Stable fix from Stefan, ensuring we properly break links for short
   send/sendmsg recv/recvmsg if MSG_WAITALL is set.

 - Catch and loop when needing to run task_work before a PF_IO_WORKER
   threads goes to sleep.

* tag 'io_uring-5.12-2021-03-21' of git://git.kernel.dk/linux-block:
  io_uring: call req_set_fail_links() on short send[msg]()/recv[msg]() with MSG_WAITALL
  io-wq: ensure task is running before processing task_work
  signal: don't allow STOP on PF_IO_WORKER threads
  signal: don't allow sending any signals to PF_IO_WORKER threads
parents 1d4345eb 0031275d
...@@ -386,13 +386,16 @@ static struct io_wq_work *io_get_next_work(struct io_wqe *wqe) ...@@ -386,13 +386,16 @@ static struct io_wq_work *io_get_next_work(struct io_wqe *wqe)
return NULL; return NULL;
} }
static void io_flush_signals(void) static bool io_flush_signals(void)
{ {
if (unlikely(test_tsk_thread_flag(current, TIF_NOTIFY_SIGNAL))) { if (unlikely(test_tsk_thread_flag(current, TIF_NOTIFY_SIGNAL))) {
__set_current_state(TASK_RUNNING);
if (current->task_works) if (current->task_works)
task_work_run(); task_work_run();
clear_tsk_thread_flag(current, TIF_NOTIFY_SIGNAL); clear_tsk_thread_flag(current, TIF_NOTIFY_SIGNAL);
return true;
} }
return false;
} }
static void io_assign_current_work(struct io_worker *worker, static void io_assign_current_work(struct io_worker *worker,
...@@ -499,7 +502,8 @@ static int io_wqe_worker(void *data) ...@@ -499,7 +502,8 @@ static int io_wqe_worker(void *data)
} }
__io_worker_idle(wqe, worker); __io_worker_idle(wqe, worker);
raw_spin_unlock_irq(&wqe->lock); raw_spin_unlock_irq(&wqe->lock);
io_flush_signals(); if (io_flush_signals())
continue;
ret = schedule_timeout(WORKER_IDLE_TIMEOUT); ret = schedule_timeout(WORKER_IDLE_TIMEOUT);
if (try_to_freeze() || ret) if (try_to_freeze() || ret)
continue; continue;
......
...@@ -4386,6 +4386,7 @@ static int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags) ...@@ -4386,6 +4386,7 @@ static int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags)
struct io_async_msghdr iomsg, *kmsg; struct io_async_msghdr iomsg, *kmsg;
struct socket *sock; struct socket *sock;
unsigned flags; unsigned flags;
int min_ret = 0;
int ret; int ret;
sock = sock_from_file(req->file); sock = sock_from_file(req->file);
...@@ -4406,6 +4407,9 @@ static int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags) ...@@ -4406,6 +4407,9 @@ static int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags)
else if (issue_flags & IO_URING_F_NONBLOCK) else if (issue_flags & IO_URING_F_NONBLOCK)
flags |= MSG_DONTWAIT; flags |= MSG_DONTWAIT;
if (flags & MSG_WAITALL)
min_ret = iov_iter_count(&kmsg->msg.msg_iter);
ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags); ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags);
if ((issue_flags & IO_URING_F_NONBLOCK) && ret == -EAGAIN) if ((issue_flags & IO_URING_F_NONBLOCK) && ret == -EAGAIN)
return io_setup_async_msg(req, kmsg); return io_setup_async_msg(req, kmsg);
...@@ -4416,7 +4420,7 @@ static int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags) ...@@ -4416,7 +4420,7 @@ static int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags)
if (kmsg->free_iov) if (kmsg->free_iov)
kfree(kmsg->free_iov); kfree(kmsg->free_iov);
req->flags &= ~REQ_F_NEED_CLEANUP; req->flags &= ~REQ_F_NEED_CLEANUP;
if (ret < 0) if (ret < min_ret)
req_set_fail_links(req); req_set_fail_links(req);
__io_req_complete(req, issue_flags, ret, 0); __io_req_complete(req, issue_flags, ret, 0);
return 0; return 0;
...@@ -4429,6 +4433,7 @@ static int io_send(struct io_kiocb *req, unsigned int issue_flags) ...@@ -4429,6 +4433,7 @@ static int io_send(struct io_kiocb *req, unsigned int issue_flags)
struct iovec iov; struct iovec iov;
struct socket *sock; struct socket *sock;
unsigned flags; unsigned flags;
int min_ret = 0;
int ret; int ret;
sock = sock_from_file(req->file); sock = sock_from_file(req->file);
...@@ -4450,6 +4455,9 @@ static int io_send(struct io_kiocb *req, unsigned int issue_flags) ...@@ -4450,6 +4455,9 @@ static int io_send(struct io_kiocb *req, unsigned int issue_flags)
else if (issue_flags & IO_URING_F_NONBLOCK) else if (issue_flags & IO_URING_F_NONBLOCK)
flags |= MSG_DONTWAIT; flags |= MSG_DONTWAIT;
if (flags & MSG_WAITALL)
min_ret = iov_iter_count(&msg.msg_iter);
msg.msg_flags = flags; msg.msg_flags = flags;
ret = sock_sendmsg(sock, &msg); ret = sock_sendmsg(sock, &msg);
if ((issue_flags & IO_URING_F_NONBLOCK) && ret == -EAGAIN) if ((issue_flags & IO_URING_F_NONBLOCK) && ret == -EAGAIN)
...@@ -4457,7 +4465,7 @@ static int io_send(struct io_kiocb *req, unsigned int issue_flags) ...@@ -4457,7 +4465,7 @@ static int io_send(struct io_kiocb *req, unsigned int issue_flags)
if (ret == -ERESTARTSYS) if (ret == -ERESTARTSYS)
ret = -EINTR; ret = -EINTR;
if (ret < 0) if (ret < min_ret)
req_set_fail_links(req); req_set_fail_links(req);
__io_req_complete(req, issue_flags, ret, 0); __io_req_complete(req, issue_flags, ret, 0);
return 0; return 0;
...@@ -4609,6 +4617,7 @@ static int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags) ...@@ -4609,6 +4617,7 @@ static int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags)
struct socket *sock; struct socket *sock;
struct io_buffer *kbuf; struct io_buffer *kbuf;
unsigned flags; unsigned flags;
int min_ret = 0;
int ret, cflags = 0; int ret, cflags = 0;
bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK; bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
...@@ -4640,6 +4649,9 @@ static int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags) ...@@ -4640,6 +4649,9 @@ static int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags)
else if (force_nonblock) else if (force_nonblock)
flags |= MSG_DONTWAIT; flags |= MSG_DONTWAIT;
if (flags & MSG_WAITALL)
min_ret = iov_iter_count(&kmsg->msg.msg_iter);
ret = __sys_recvmsg_sock(sock, &kmsg->msg, req->sr_msg.umsg, ret = __sys_recvmsg_sock(sock, &kmsg->msg, req->sr_msg.umsg,
kmsg->uaddr, flags); kmsg->uaddr, flags);
if (force_nonblock && ret == -EAGAIN) if (force_nonblock && ret == -EAGAIN)
...@@ -4653,7 +4665,7 @@ static int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags) ...@@ -4653,7 +4665,7 @@ static int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags)
if (kmsg->free_iov) if (kmsg->free_iov)
kfree(kmsg->free_iov); kfree(kmsg->free_iov);
req->flags &= ~REQ_F_NEED_CLEANUP; req->flags &= ~REQ_F_NEED_CLEANUP;
if (ret < 0) if (ret < min_ret || ((flags & MSG_WAITALL) && (kmsg->msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))))
req_set_fail_links(req); req_set_fail_links(req);
__io_req_complete(req, issue_flags, ret, cflags); __io_req_complete(req, issue_flags, ret, cflags);
return 0; return 0;
...@@ -4668,6 +4680,7 @@ static int io_recv(struct io_kiocb *req, unsigned int issue_flags) ...@@ -4668,6 +4680,7 @@ static int io_recv(struct io_kiocb *req, unsigned int issue_flags)
struct socket *sock; struct socket *sock;
struct iovec iov; struct iovec iov;
unsigned flags; unsigned flags;
int min_ret = 0;
int ret, cflags = 0; int ret, cflags = 0;
bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK; bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
...@@ -4699,6 +4712,9 @@ static int io_recv(struct io_kiocb *req, unsigned int issue_flags) ...@@ -4699,6 +4712,9 @@ static int io_recv(struct io_kiocb *req, unsigned int issue_flags)
else if (force_nonblock) else if (force_nonblock)
flags |= MSG_DONTWAIT; flags |= MSG_DONTWAIT;
if (flags & MSG_WAITALL)
min_ret = iov_iter_count(&msg.msg_iter);
ret = sock_recvmsg(sock, &msg, flags); ret = sock_recvmsg(sock, &msg, flags);
if (force_nonblock && ret == -EAGAIN) if (force_nonblock && ret == -EAGAIN)
return -EAGAIN; return -EAGAIN;
...@@ -4707,7 +4723,7 @@ static int io_recv(struct io_kiocb *req, unsigned int issue_flags) ...@@ -4707,7 +4723,7 @@ static int io_recv(struct io_kiocb *req, unsigned int issue_flags)
out_free: out_free:
if (req->flags & REQ_F_BUFFER_SELECTED) if (req->flags & REQ_F_BUFFER_SELECTED)
cflags = io_put_recv_kbuf(req); cflags = io_put_recv_kbuf(req);
if (ret < 0) if (ret < min_ret || ((flags & MSG_WAITALL) && (msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))))
req_set_fail_links(req); req_set_fail_links(req);
__io_req_complete(req, issue_flags, ret, cflags); __io_req_complete(req, issue_flags, ret, cflags);
return 0; return 0;
......
...@@ -288,7 +288,8 @@ bool task_set_jobctl_pending(struct task_struct *task, unsigned long mask) ...@@ -288,7 +288,8 @@ bool task_set_jobctl_pending(struct task_struct *task, unsigned long mask)
JOBCTL_STOP_SIGMASK | JOBCTL_TRAPPING)); JOBCTL_STOP_SIGMASK | JOBCTL_TRAPPING));
BUG_ON((mask & JOBCTL_TRAPPING) && !(mask & JOBCTL_PENDING_MASK)); BUG_ON((mask & JOBCTL_TRAPPING) && !(mask & JOBCTL_PENDING_MASK));
if (unlikely(fatal_signal_pending(task) || (task->flags & PF_EXITING))) if (unlikely(fatal_signal_pending(task) ||
(task->flags & (PF_EXITING | PF_IO_WORKER))))
return false; return false;
if (mask & JOBCTL_STOP_SIGMASK) if (mask & JOBCTL_STOP_SIGMASK)
...@@ -833,6 +834,9 @@ static int check_kill_permission(int sig, struct kernel_siginfo *info, ...@@ -833,6 +834,9 @@ static int check_kill_permission(int sig, struct kernel_siginfo *info,
if (!valid_signal(sig)) if (!valid_signal(sig))
return -EINVAL; return -EINVAL;
/* PF_IO_WORKER threads don't take any signals */
if (t->flags & PF_IO_WORKER)
return -ESRCH;
if (!si_fromuser(info)) if (!si_fromuser(info))
return 0; return 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment