Commit 2b85edfc authored by Pavel Begunkov's avatar Pavel Begunkov Committed by Jens Axboe

io_uring: batch getting pcpu references

percpu_ref_tryget() has its own overhead. Instead getting a reference
for each request, grab a bunch once per io_submit_sqes().

~5% throughput boost for a "submit and wait 128 nops" benchmark.
Signed-off-by: default avatarPavel Begunkov <asml.silence@gmail.com>

__io_req_free_empty() -> __io_req_do_free()
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 4e5ef023
...@@ -1083,9 +1083,6 @@ static struct io_kiocb *io_get_req(struct io_ring_ctx *ctx, ...@@ -1083,9 +1083,6 @@ static struct io_kiocb *io_get_req(struct io_ring_ctx *ctx,
gfp_t gfp = GFP_KERNEL | __GFP_NOWARN; gfp_t gfp = GFP_KERNEL | __GFP_NOWARN;
struct io_kiocb *req; struct io_kiocb *req;
if (!percpu_ref_tryget(&ctx->refs))
return NULL;
if (!state) { if (!state) {
req = kmem_cache_alloc(req_cachep, gfp); req = kmem_cache_alloc(req_cachep, gfp);
if (unlikely(!req)) if (unlikely(!req))
...@@ -1145,6 +1142,14 @@ static void io_free_req_many(struct io_ring_ctx *ctx, void **reqs, int *nr) ...@@ -1145,6 +1142,14 @@ static void io_free_req_many(struct io_ring_ctx *ctx, void **reqs, int *nr)
} }
} }
static void __io_req_do_free(struct io_kiocb *req)
{
if (likely(!io_is_fallback_req(req)))
kmem_cache_free(req_cachep, req);
else
clear_bit_unlock(0, (unsigned long *) req->ctx->fallback_req);
}
static void __io_free_req(struct io_kiocb *req) static void __io_free_req(struct io_kiocb *req)
{ {
struct io_ring_ctx *ctx = req->ctx; struct io_ring_ctx *ctx = req->ctx;
...@@ -1166,11 +1171,9 @@ static void __io_free_req(struct io_kiocb *req) ...@@ -1166,11 +1171,9 @@ static void __io_free_req(struct io_kiocb *req)
wake_up(&ctx->inflight_wait); wake_up(&ctx->inflight_wait);
spin_unlock_irqrestore(&ctx->inflight_lock, flags); spin_unlock_irqrestore(&ctx->inflight_lock, flags);
} }
percpu_ref_put(&ctx->refs);
if (likely(!io_is_fallback_req(req))) percpu_ref_put(&req->ctx->refs);
kmem_cache_free(req_cachep, req); __io_req_do_free(req);
else
clear_bit_unlock(0, (unsigned long *) ctx->fallback_req);
} }
static bool io_link_cancel_timeout(struct io_kiocb *req) static bool io_link_cancel_timeout(struct io_kiocb *req)
...@@ -4539,6 +4542,9 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr, ...@@ -4539,6 +4542,9 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
return -EBUSY; return -EBUSY;
} }
if (!percpu_ref_tryget_many(&ctx->refs, nr))
return -EAGAIN;
if (nr > IO_PLUG_THRESHOLD) { if (nr > IO_PLUG_THRESHOLD) {
io_submit_state_start(&state, nr); io_submit_state_start(&state, nr);
statep = &state; statep = &state;
...@@ -4555,7 +4561,7 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr, ...@@ -4555,7 +4561,7 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
break; break;
} }
if (!io_get_sqring(ctx, req, &sqe)) { if (!io_get_sqring(ctx, req, &sqe)) {
__io_free_req(req); __io_req_do_free(req);
break; break;
} }
...@@ -4586,6 +4592,8 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr, ...@@ -4586,6 +4592,8 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
break; break;
} }
if (submitted != nr)
percpu_ref_put_many(&ctx->refs, nr - submitted);
if (link) if (link)
io_queue_link_head(link); io_queue_link_head(link);
if (statep) if (statep)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment