Commit ac787ffa authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'io_uring-6.2-2022-12-29' of git://git.kernel.dk/linux

Pull io_uring fixes from Jens Axboe:

 - Two fixes for mutex grabbing when the task state is != TASK_RUNNING
   (me)

 - Check for invalid opcode in io_uring_register() a bit earlier, to
   avoid going through the quiesce machinery just to return -EINVAL
   later in the process (me)

 - Fix for the uapi io_uring header, skipping including time_types.h
   when necessary (Stefan)

* tag 'io_uring-6.2-2022-12-29' of git://git.kernel.dk/linux:
  uapi:io_uring.h: allow linux/time_types.h to be skipped
  io_uring: check for valid register opcode earlier
  io_uring/cancel: re-grab ctx mutex after finishing wait
  io_uring: finish waiting before flushing overflow entries
parents 69fb073b 9eb80340
...@@ -10,7 +10,15 @@ ...@@ -10,7 +10,15 @@
#include <linux/fs.h> #include <linux/fs.h>
#include <linux/types.h> #include <linux/types.h>
/*
* this file is shared with liburing and that has to autodetect
* if linux/time_types.h is available or not, it can
* define UAPI_LINUX_IO_URING_H_SKIP_LINUX_TIME_TYPES_H
* if linux/time_types.h is not available
*/
#ifndef UAPI_LINUX_IO_URING_H_SKIP_LINUX_TIME_TYPES_H
#include <linux/time_types.h> #include <linux/time_types.h>
#endif
#ifdef __cplusplus #ifdef __cplusplus
extern "C" { extern "C" {
......
...@@ -288,24 +288,23 @@ int io_sync_cancel(struct io_ring_ctx *ctx, void __user *arg) ...@@ -288,24 +288,23 @@ int io_sync_cancel(struct io_ring_ctx *ctx, void __user *arg)
ret = __io_sync_cancel(current->io_uring, &cd, sc.fd); ret = __io_sync_cancel(current->io_uring, &cd, sc.fd);
mutex_unlock(&ctx->uring_lock);
if (ret != -EALREADY) if (ret != -EALREADY)
break; break;
mutex_unlock(&ctx->uring_lock);
ret = io_run_task_work_sig(ctx); ret = io_run_task_work_sig(ctx);
if (ret < 0) { if (ret < 0)
mutex_lock(&ctx->uring_lock);
break; break;
}
ret = schedule_hrtimeout(&timeout, HRTIMER_MODE_ABS); ret = schedule_hrtimeout(&timeout, HRTIMER_MODE_ABS);
mutex_lock(&ctx->uring_lock);
if (!ret) { if (!ret) {
ret = -ETIME; ret = -ETIME;
break; break;
} }
mutex_lock(&ctx->uring_lock);
} while (1); } while (1);
finish_wait(&ctx->cq_wait, &wait); finish_wait(&ctx->cq_wait, &wait);
mutex_lock(&ctx->uring_lock);
if (ret == -ENOENT || ret > 0) if (ret == -ENOENT || ret > 0)
ret = 0; ret = 0;
......
...@@ -677,16 +677,20 @@ static void __io_cqring_overflow_flush(struct io_ring_ctx *ctx) ...@@ -677,16 +677,20 @@ static void __io_cqring_overflow_flush(struct io_ring_ctx *ctx)
io_cq_unlock_post(ctx); io_cq_unlock_post(ctx);
} }
static void io_cqring_overflow_flush(struct io_ring_ctx *ctx) static void io_cqring_do_overflow_flush(struct io_ring_ctx *ctx)
{ {
if (test_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq)) {
/* iopoll syncs against uring_lock, not completion_lock */ /* iopoll syncs against uring_lock, not completion_lock */
if (ctx->flags & IORING_SETUP_IOPOLL) if (ctx->flags & IORING_SETUP_IOPOLL)
mutex_lock(&ctx->uring_lock); mutex_lock(&ctx->uring_lock);
__io_cqring_overflow_flush(ctx); __io_cqring_overflow_flush(ctx);
if (ctx->flags & IORING_SETUP_IOPOLL) if (ctx->flags & IORING_SETUP_IOPOLL)
mutex_unlock(&ctx->uring_lock); mutex_unlock(&ctx->uring_lock);
} }
static void io_cqring_overflow_flush(struct io_ring_ctx *ctx)
{
if (test_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq))
io_cqring_do_overflow_flush(ctx);
} }
void __io_put_task(struct task_struct *task, int nr) void __io_put_task(struct task_struct *task, int nr)
...@@ -2549,7 +2553,10 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events, ...@@ -2549,7 +2553,10 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
trace_io_uring_cqring_wait(ctx, min_events); trace_io_uring_cqring_wait(ctx, min_events);
do { do {
io_cqring_overflow_flush(ctx); if (test_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq)) {
finish_wait(&ctx->cq_wait, &iowq.wq);
io_cqring_do_overflow_flush(ctx);
}
prepare_to_wait_exclusive(&ctx->cq_wait, &iowq.wq, prepare_to_wait_exclusive(&ctx->cq_wait, &iowq.wq,
TASK_INTERRUPTIBLE); TASK_INTERRUPTIBLE);
ret = io_cqring_wait_schedule(ctx, &iowq, timeout); ret = io_cqring_wait_schedule(ctx, &iowq, timeout);
...@@ -4013,8 +4020,6 @@ static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode, ...@@ -4013,8 +4020,6 @@ static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode,
return -EEXIST; return -EEXIST;
if (ctx->restricted) { if (ctx->restricted) {
if (opcode >= IORING_REGISTER_LAST)
return -EINVAL;
opcode = array_index_nospec(opcode, IORING_REGISTER_LAST); opcode = array_index_nospec(opcode, IORING_REGISTER_LAST);
if (!test_bit(opcode, ctx->restrictions.register_op)) if (!test_bit(opcode, ctx->restrictions.register_op))
return -EACCES; return -EACCES;
...@@ -4170,6 +4175,9 @@ SYSCALL_DEFINE4(io_uring_register, unsigned int, fd, unsigned int, opcode, ...@@ -4170,6 +4175,9 @@ SYSCALL_DEFINE4(io_uring_register, unsigned int, fd, unsigned int, opcode,
long ret = -EBADF; long ret = -EBADF;
struct fd f; struct fd f;
if (opcode >= IORING_REGISTER_LAST)
return -EINVAL;
f = fdget(fd); f = fdget(fd);
if (!f.file) if (!f.file)
return -EBADF; return -EBADF;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment