Commit 8e6971a8 authored by Pavel Begunkov's avatar Pavel Begunkov Committed by Jens Axboe

io_uring: optimise submission loop invariant

Instead of keeping @submitted in io_submit_sqes(), which for each
iteration requires comparison with the initial number of SQEs, store the
number of SQEs left to submit. We'll need nr only for when we're done
with SQE handling.

note: if we can't allocate a req for the first SQE we always has been
returning -EAGAIN to the userspace, save this behaviour by looking into
the cache in a slow path.
Signed-off-by: default avatarPavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/c3b3df9aeae4c2f7a53fd8386385742e4e261e77.1649771823.git.asml.silence@gmail.comSigned-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent fa05457a
...@@ -7855,24 +7855,22 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr) ...@@ -7855,24 +7855,22 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr)
__must_hold(&ctx->uring_lock) __must_hold(&ctx->uring_lock)
{ {
unsigned int entries = io_sqring_entries(ctx); unsigned int entries = io_sqring_entries(ctx);
int submitted = 0; unsigned int left;
int ret;
if (unlikely(!entries)) if (unlikely(!entries))
return 0; return 0;
/* make sure SQ entry isn't read before tail */ /* make sure SQ entry isn't read before tail */
nr = min3(nr, ctx->sq_entries, entries); ret = left = min3(nr, ctx->sq_entries, entries);
io_get_task_refs(nr); io_get_task_refs(left);
io_submit_state_start(&ctx->submit_state, left);
io_submit_state_start(&ctx->submit_state, nr);
do { do {
const struct io_uring_sqe *sqe; const struct io_uring_sqe *sqe;
struct io_kiocb *req; struct io_kiocb *req;
if (unlikely(!io_alloc_req_refill(ctx))) { if (unlikely(!io_alloc_req_refill(ctx)))
if (!submitted)
submitted = -EAGAIN;
break; break;
}
req = io_alloc_req(ctx); req = io_alloc_req(ctx);
sqe = io_get_sqe(ctx); sqe = io_get_sqe(ctx);
if (unlikely(!sqe)) { if (unlikely(!sqe)) {
...@@ -7880,7 +7878,7 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr) ...@@ -7880,7 +7878,7 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr)
break; break;
} }
/* will complete beyond this point, count as submitted */ /* will complete beyond this point, count as submitted */
submitted++; left--;
if (io_submit_sqe(ctx, req, sqe)) { if (io_submit_sqe(ctx, req, sqe)) {
/* /*
* Continue submitting even for sqe failure if the * Continue submitting even for sqe failure if the
...@@ -7889,20 +7887,20 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr) ...@@ -7889,20 +7887,20 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr)
if (!(ctx->flags & IORING_SETUP_SUBMIT_ALL)) if (!(ctx->flags & IORING_SETUP_SUBMIT_ALL))
break; break;
} }
} while (submitted < nr); } while (left);
if (unlikely(submitted != nr)) { if (unlikely(left)) {
int ref_used = (submitted == -EAGAIN) ? 0 : submitted; ret -= left;
int unused = nr - ref_used; /* try again if it submitted nothing and can't allocate a req */
if (!ret && io_req_cache_empty(ctx))
current->io_uring->cached_refs += unused; ret = -EAGAIN;
current->io_uring->cached_refs += left;
} }
io_submit_state_end(ctx); io_submit_state_end(ctx);
/* Commit SQ ring head once we've consumed and submitted all SQEs */ /* Commit SQ ring head once we've consumed and submitted all SQEs */
io_commit_sqring(ctx); io_commit_sqring(ctx);
return ret;
return submitted;
} }
static inline bool io_sqd_events_pending(struct io_sq_data *sqd) static inline bool io_sqd_events_pending(struct io_sq_data *sqd)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment