Commit bb943b82 authored by Pavel Begunkov's avatar Pavel Begunkov Committed by Jens Axboe

io_uring: use inflight_entry instead of compl.list

req->compl.list is used to cache freed requests, and so can't overlap in
time with req->inflight_entry. So, use inflight_entry to link requests
and remove compl.list.
Signed-off-by: default avatarPavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/e430e79d22d70a190d718831bda7bfed1daf8976.1628536684.git.asml.silence@gmail.comSigned-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 7255834e
...@@ -670,7 +670,6 @@ struct io_unlink { ...@@ -670,7 +670,6 @@ struct io_unlink {
struct io_completion { struct io_completion {
struct file *file; struct file *file;
struct list_head list;
u32 cflags; u32 cflags;
}; };
...@@ -1670,7 +1669,7 @@ static void io_req_complete_post(struct io_kiocb *req, long res, ...@@ -1670,7 +1669,7 @@ static void io_req_complete_post(struct io_kiocb *req, long res,
} }
io_dismantle_req(req); io_dismantle_req(req);
io_put_task(req->task, 1); io_put_task(req->task, 1);
list_add(&req->compl.list, &ctx->locked_free_list); list_add(&req->inflight_entry, &ctx->locked_free_list);
ctx->locked_free_nr++; ctx->locked_free_nr++;
} else { } else {
if (!percpu_ref_tryget(&ctx->refs)) if (!percpu_ref_tryget(&ctx->refs))
...@@ -1761,9 +1760,9 @@ static bool io_flush_cached_reqs(struct io_ring_ctx *ctx) ...@@ -1761,9 +1760,9 @@ static bool io_flush_cached_reqs(struct io_ring_ctx *ctx)
nr = state->free_reqs; nr = state->free_reqs;
while (!list_empty(&cs->free_list)) { while (!list_empty(&cs->free_list)) {
struct io_kiocb *req = list_first_entry(&cs->free_list, struct io_kiocb *req = list_first_entry(&cs->free_list,
struct io_kiocb, compl.list); struct io_kiocb, inflight_entry);
list_del(&req->compl.list); list_del(&req->inflight_entry);
state->reqs[nr++] = req; state->reqs[nr++] = req;
if (nr == ARRAY_SIZE(state->reqs)) if (nr == ARRAY_SIZE(state->reqs))
break; break;
...@@ -1837,7 +1836,7 @@ static void __io_free_req(struct io_kiocb *req) ...@@ -1837,7 +1836,7 @@ static void __io_free_req(struct io_kiocb *req)
io_put_task(req->task, 1); io_put_task(req->task, 1);
spin_lock_irqsave(&ctx->completion_lock, flags); spin_lock_irqsave(&ctx->completion_lock, flags);
list_add(&req->compl.list, &ctx->locked_free_list); list_add(&req->inflight_entry, &ctx->locked_free_list);
ctx->locked_free_nr++; ctx->locked_free_nr++;
spin_unlock_irqrestore(&ctx->completion_lock, flags); spin_unlock_irqrestore(&ctx->completion_lock, flags);
...@@ -2144,7 +2143,7 @@ static void io_req_free_batch(struct req_batch *rb, struct io_kiocb *req, ...@@ -2144,7 +2143,7 @@ static void io_req_free_batch(struct req_batch *rb, struct io_kiocb *req,
if (state->free_reqs != ARRAY_SIZE(state->reqs)) if (state->free_reqs != ARRAY_SIZE(state->reqs))
state->reqs[state->free_reqs++] = req; state->reqs[state->free_reqs++] = req;
else else
list_add(&req->compl.list, &state->comp.free_list); list_add(&req->inflight_entry, &state->comp.free_list);
} }
static void io_submit_flush_completions(struct io_ring_ctx *ctx) static void io_submit_flush_completions(struct io_ring_ctx *ctx)
...@@ -8626,8 +8625,8 @@ static void io_req_cache_free(struct list_head *list) ...@@ -8626,8 +8625,8 @@ static void io_req_cache_free(struct list_head *list)
{ {
struct io_kiocb *req, *nxt; struct io_kiocb *req, *nxt;
list_for_each_entry_safe(req, nxt, list, compl.list) { list_for_each_entry_safe(req, nxt, list, inflight_entry) {
list_del(&req->compl.list); list_del(&req->inflight_entry);
kmem_cache_free(req_cachep, req); kmem_cache_free(req_cachep, req);
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment