Commit 27dc8338 authored by Pavel Begunkov's avatar Pavel Begunkov Committed by Jens Axboe

io_uring: use non-intrusive list for defer

The only left user of req->list is DRAIN, hence instead of keeping a
separate per request list for it, do that with old fashion non-intrusive
lists allocated on demand. That's a really slow path, so that's OK.

This removes req->list and so sheds 16 bytes from io_kiocb.
Signed-off-by: default avatarPavel Begunkov <asml.silence@gmail.com>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 7d6ddea6
...@@ -641,7 +641,6 @@ struct io_kiocb { ...@@ -641,7 +641,6 @@ struct io_kiocb {
u16 buf_index; u16 buf_index;
struct io_ring_ctx *ctx; struct io_ring_ctx *ctx;
struct list_head list;
unsigned int flags; unsigned int flags;
refcount_t refs; refcount_t refs;
struct task_struct *task; struct task_struct *task;
...@@ -676,6 +675,11 @@ struct io_kiocb { ...@@ -676,6 +675,11 @@ struct io_kiocb {
struct callback_head task_work; struct callback_head task_work;
}; };
struct io_defer_entry {
struct list_head list;
struct io_kiocb *req;
};
#define IO_IOPOLL_BATCH 8 #define IO_IOPOLL_BATCH 8
struct io_comp_state { struct io_comp_state {
...@@ -1234,14 +1238,15 @@ static void io_kill_timeouts(struct io_ring_ctx *ctx) ...@@ -1234,14 +1238,15 @@ static void io_kill_timeouts(struct io_ring_ctx *ctx)
static void __io_queue_deferred(struct io_ring_ctx *ctx) static void __io_queue_deferred(struct io_ring_ctx *ctx)
{ {
do { do {
struct io_kiocb *req = list_first_entry(&ctx->defer_list, struct io_defer_entry *de = list_first_entry(&ctx->defer_list,
struct io_kiocb, list); struct io_defer_entry, list);
if (req_need_defer(req)) if (req_need_defer(de->req))
break; break;
list_del_init(&req->list); list_del_init(&de->list);
/* punt-init is done before queueing for defer */ /* punt-init is done before queueing for defer */
__io_queue_async_work(req); __io_queue_async_work(de->req);
kfree(de);
} while (!list_empty(&ctx->defer_list)); } while (!list_empty(&ctx->defer_list));
} }
...@@ -5394,6 +5399,7 @@ static int io_req_defer_prep(struct io_kiocb *req, ...@@ -5394,6 +5399,7 @@ static int io_req_defer_prep(struct io_kiocb *req,
static int io_req_defer(struct io_kiocb *req, const struct io_uring_sqe *sqe) static int io_req_defer(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{ {
struct io_ring_ctx *ctx = req->ctx; struct io_ring_ctx *ctx = req->ctx;
struct io_defer_entry *de;
int ret; int ret;
/* Still need defer if there is pending req in defer list. */ /* Still need defer if there is pending req in defer list. */
...@@ -5408,15 +5414,20 @@ static int io_req_defer(struct io_kiocb *req, const struct io_uring_sqe *sqe) ...@@ -5408,15 +5414,20 @@ static int io_req_defer(struct io_kiocb *req, const struct io_uring_sqe *sqe)
return ret; return ret;
} }
io_prep_async_link(req); io_prep_async_link(req);
de = kmalloc(sizeof(*de), GFP_KERNEL);
if (!de)
return -ENOMEM;
spin_lock_irq(&ctx->completion_lock); spin_lock_irq(&ctx->completion_lock);
if (!req_need_defer(req) && list_empty(&ctx->defer_list)) { if (!req_need_defer(req) && list_empty(&ctx->defer_list)) {
spin_unlock_irq(&ctx->completion_lock); spin_unlock_irq(&ctx->completion_lock);
kfree(de);
return 0; return 0;
} }
trace_io_uring_defer(ctx, req, req->user_data); trace_io_uring_defer(ctx, req, req->user_data);
list_add_tail(&req->list, &ctx->defer_list); de->req = req;
list_add_tail(&de->list, &ctx->defer_list);
spin_unlock_irq(&ctx->completion_lock); spin_unlock_irq(&ctx->completion_lock);
return -EIOCBQUEUED; return -EIOCBQUEUED;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment