Commit 33cc89a9 authored by Pavel Begunkov's avatar Pavel Begunkov Committed by Jens Axboe

io_uring: add io_disarm_next() helper

A preparation patch placing all preparations before extracting a next
request into a separate helper io_disarm_next().

Also, don't spuriously do ev_posted in a rare case where REQ_F_FAIL_LINK
is set but there are no requests linked (i.e. after cancelling a linked
timeout or setting IOSQE_IO_LINK on a last request of a submission
batch).
Signed-off-by: default avatarPavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/44ecff68d6b47e1c4e6b891bdde1ddc08cfc3590.1615250156.git.asml.silence@gmail.comSigned-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 97a73a0f
......@@ -1705,15 +1705,11 @@ static inline void io_remove_next_linked(struct io_kiocb *req)
nxt->link = NULL;
}
static void io_kill_linked_timeout(struct io_kiocb *req)
static bool io_kill_linked_timeout(struct io_kiocb *req)
__must_hold(&req->ctx->completion_lock)
{
struct io_ring_ctx *ctx = req->ctx;
struct io_kiocb *link;
struct io_kiocb *link = req->link;
bool cancelled = false;
unsigned long flags;
spin_lock_irqsave(&ctx->completion_lock, flags);
link = req->link;
/*
* Can happen if a linked timeout fired and link had been like
......@@ -1728,50 +1724,48 @@ static void io_kill_linked_timeout(struct io_kiocb *req)
ret = hrtimer_try_to_cancel(&io->timer);
if (ret != -1) {
io_cqring_fill_event(link, -ECANCELED);
io_commit_cqring(ctx);
io_put_req_deferred(link, 1);
cancelled = true;
}
}
req->flags &= ~REQ_F_LINK_TIMEOUT;
spin_unlock_irqrestore(&ctx->completion_lock, flags);
if (cancelled) {
io_cqring_ev_posted(ctx);
io_put_req(link);
}
return cancelled;
}
static void io_fail_links(struct io_kiocb *req)
__must_hold(&req->ctx->completion_lock)
{
struct io_kiocb *link, *nxt;
struct io_ring_ctx *ctx = req->ctx;
unsigned long flags;
struct io_kiocb *nxt, *link = req->link;
spin_lock_irqsave(&ctx->completion_lock, flags);
link = req->link;
req->link = NULL;
while (link) {
nxt = link->link;
link->link = NULL;
trace_io_uring_fail_link(req, link);
io_cqring_fill_event(link, -ECANCELED);
io_put_req_deferred(link, 2);
link = nxt;
}
io_commit_cqring(ctx);
spin_unlock_irqrestore(&ctx->completion_lock, flags);
}
io_cqring_ev_posted(ctx);
static bool io_disarm_next(struct io_kiocb *req)
__must_hold(&req->ctx->completion_lock)
{
bool posted = false;
if (likely(req->flags & REQ_F_LINK_TIMEOUT))
posted = io_kill_linked_timeout(req);
if (unlikely(req->flags & REQ_F_FAIL_LINK)) {
posted |= (req->link != NULL);
io_fail_links(req);
}
return posted;
}
static struct io_kiocb *__io_req_find_next(struct io_kiocb *req)
{
if (req->flags & REQ_F_LINK_TIMEOUT)
io_kill_linked_timeout(req);
struct io_kiocb *nxt;
/*
* If LINK is set, we have dependent requests in this chain. If we
......@@ -1779,14 +1773,22 @@ static struct io_kiocb *__io_req_find_next(struct io_kiocb *req)
* dependencies to the next request. In case of failure, fail the rest
* of the chain.
*/
if (likely(!(req->flags & REQ_F_FAIL_LINK))) {
struct io_kiocb *nxt = req->link;
if (req->flags & (REQ_F_LINK_TIMEOUT | REQ_F_FAIL_LINK)) {
struct io_ring_ctx *ctx = req->ctx;
unsigned long flags;
bool posted;
spin_lock_irqsave(&ctx->completion_lock, flags);
posted = io_disarm_next(req);
if (posted)
io_commit_cqring(req->ctx);
spin_unlock_irqrestore(&ctx->completion_lock, flags);
if (posted)
io_cqring_ev_posted(ctx);
}
nxt = req->link;
req->link = NULL;
return nxt;
}
io_fail_links(req);
return NULL;
}
static inline struct io_kiocb *io_req_find_next(struct io_kiocb *req)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment