Commit 9a10867a authored by Pavel Begunkov's avatar Pavel Begunkov Committed by Jens Axboe

io_uring: add task-refs-get helper

As we have a more complicated task referencing, which apart from normal
task references includes taking tctx->inflight and caching all that, it
would be a good idea to have all that isolated in helpers.
Signed-off-by: default avatarPavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/d9114d037f1c195897aa13f38a496078eca2afdb.1630023531.git.asml.silence@gmail.comSigned-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent a8295b98
...@@ -1670,6 +1670,24 @@ static inline void io_put_task(struct task_struct *task, int nr) ...@@ -1670,6 +1670,24 @@ static inline void io_put_task(struct task_struct *task, int nr)
} }
} }
static void io_task_refs_refill(struct io_uring_task *tctx)
{
unsigned int refill = -tctx->cached_refs + IO_TCTX_REFS_CACHE_NR;
percpu_counter_add(&tctx->inflight, refill);
refcount_add(refill, &current->usage);
tctx->cached_refs += refill;
}
static inline void io_get_task_refs(int nr)
{
struct io_uring_task *tctx = current->io_uring;
tctx->cached_refs -= nr;
if (unlikely(tctx->cached_refs < 0))
io_task_refs_refill(tctx);
}
static bool io_cqring_event_overflow(struct io_ring_ctx *ctx, u64 user_data, static bool io_cqring_event_overflow(struct io_ring_ctx *ctx, u64 user_data,
long res, unsigned int cflags) long res, unsigned int cflags)
{ {
...@@ -6890,25 +6908,15 @@ static const struct io_uring_sqe *io_get_sqe(struct io_ring_ctx *ctx) ...@@ -6890,25 +6908,15 @@ static const struct io_uring_sqe *io_get_sqe(struct io_ring_ctx *ctx)
static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr) static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr)
__must_hold(&ctx->uring_lock) __must_hold(&ctx->uring_lock)
{ {
struct io_uring_task *tctx;
int submitted = 0; int submitted = 0;
/* make sure SQ entry isn't read before tail */ /* make sure SQ entry isn't read before tail */
nr = min3(nr, ctx->sq_entries, io_sqring_entries(ctx)); nr = min3(nr, ctx->sq_entries, io_sqring_entries(ctx));
if (!percpu_ref_tryget_many(&ctx->refs, nr)) if (!percpu_ref_tryget_many(&ctx->refs, nr))
return -EAGAIN; return -EAGAIN;
io_get_task_refs(nr);
tctx = current->io_uring;
tctx->cached_refs -= nr;
if (unlikely(tctx->cached_refs < 0)) {
unsigned int refill = -tctx->cached_refs + IO_TCTX_REFS_CACHE_NR;
percpu_counter_add(&tctx->inflight, refill);
refcount_add(refill, &current->usage);
tctx->cached_refs += refill;
}
io_submit_state_start(&ctx->submit_state, nr); io_submit_state_start(&ctx->submit_state, nr);
while (submitted < nr) { while (submitted < nr) {
const struct io_uring_sqe *sqe; const struct io_uring_sqe *sqe;
struct io_kiocb *req; struct io_kiocb *req;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment