Commit 4e326358 authored by Pavel Begunkov's avatar Pavel Begunkov Committed by Jens Axboe

io_uring: optimise SQPOLL mm/files grabbing

There are two reasons for this. First is to optimise
io_sq_thread_acquire_mm_files() for non-SQPOLL case, which currently do
too many checks and function calls in the hot path, e.g. in
io_init_req().

The second is to not grab mm/files when there are not needed. As
__io_queue_sqe() issues only one request now, we can reuse
io_sq_thread_acquire_mm_files() instead of unconditional acquire
mm/files.
Signed-off-by: default avatarPavel Begunkov <asml.silence@gmail.com>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent d3d7298d
...@@ -1145,9 +1145,6 @@ static void io_sq_thread_drop_mm_files(void) ...@@ -1145,9 +1145,6 @@ static void io_sq_thread_drop_mm_files(void)
static int __io_sq_thread_acquire_files(struct io_ring_ctx *ctx) static int __io_sq_thread_acquire_files(struct io_ring_ctx *ctx)
{ {
if (current->flags & PF_EXITING)
return -EFAULT;
if (!current->files) { if (!current->files) {
struct files_struct *files; struct files_struct *files;
struct nsproxy *nsproxy; struct nsproxy *nsproxy;
...@@ -1175,15 +1172,9 @@ static int __io_sq_thread_acquire_mm(struct io_ring_ctx *ctx) ...@@ -1175,15 +1172,9 @@ static int __io_sq_thread_acquire_mm(struct io_ring_ctx *ctx)
{ {
struct mm_struct *mm; struct mm_struct *mm;
if (current->flags & PF_EXITING)
return -EFAULT;
if (current->mm) if (current->mm)
return 0; return 0;
/* Should never happen */
if (unlikely(!(ctx->flags & IORING_SETUP_SQPOLL)))
return -EFAULT;
task_lock(ctx->sqo_task); task_lock(ctx->sqo_task);
mm = ctx->sqo_task->mm; mm = ctx->sqo_task->mm;
if (unlikely(!mm || !mmget_not_zero(mm))) if (unlikely(!mm || !mmget_not_zero(mm)))
...@@ -1198,8 +1189,8 @@ static int __io_sq_thread_acquire_mm(struct io_ring_ctx *ctx) ...@@ -1198,8 +1189,8 @@ static int __io_sq_thread_acquire_mm(struct io_ring_ctx *ctx)
return -EFAULT; return -EFAULT;
} }
static int io_sq_thread_acquire_mm_files(struct io_ring_ctx *ctx, static int __io_sq_thread_acquire_mm_files(struct io_ring_ctx *ctx,
struct io_kiocb *req) struct io_kiocb *req)
{ {
const struct io_op_def *def = &io_op_defs[req->opcode]; const struct io_op_def *def = &io_op_defs[req->opcode];
int ret; int ret;
...@@ -1219,6 +1210,16 @@ static int io_sq_thread_acquire_mm_files(struct io_ring_ctx *ctx, ...@@ -1219,6 +1210,16 @@ static int io_sq_thread_acquire_mm_files(struct io_ring_ctx *ctx,
return 0; return 0;
} }
static inline int io_sq_thread_acquire_mm_files(struct io_ring_ctx *ctx,
struct io_kiocb *req)
{
if (unlikely(current->flags & PF_EXITING))
return -EFAULT;
if (!(ctx->flags & IORING_SETUP_SQPOLL))
return 0;
return __io_sq_thread_acquire_mm_files(ctx, req);
}
static void io_sq_thread_associate_blkcg(struct io_ring_ctx *ctx, static void io_sq_thread_associate_blkcg(struct io_ring_ctx *ctx,
struct cgroup_subsys_state **cur_css) struct cgroup_subsys_state **cur_css)
...@@ -2336,9 +2337,7 @@ static void __io_req_task_submit(struct io_kiocb *req) ...@@ -2336,9 +2337,7 @@ static void __io_req_task_submit(struct io_kiocb *req)
struct io_ring_ctx *ctx = req->ctx; struct io_ring_ctx *ctx = req->ctx;
mutex_lock(&ctx->uring_lock); mutex_lock(&ctx->uring_lock);
if (!ctx->sqo_dead && if (!ctx->sqo_dead && !io_sq_thread_acquire_mm_files(ctx, req))
!__io_sq_thread_acquire_mm(ctx) &&
!__io_sq_thread_acquire_files(ctx))
__io_queue_sqe(req); __io_queue_sqe(req);
else else
__io_req_task_cancel(req, -EFAULT); __io_req_task_cancel(req, -EFAULT);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment