Commit 5c3462cf authored by Jens Axboe's avatar Jens Axboe

io_uring: store io_identity in io_uring_task

This is, by definition, a per-task structure. So store it in the
task context, instead of doing carrying it in each io_kiocb. We're being
a bit inefficient if members have changed, as that requires an alloc and
copy of a new io_identity struct. The next patch will fix that up.
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 1e6fa521
...@@ -689,7 +689,6 @@ struct io_kiocb { ...@@ -689,7 +689,6 @@ struct io_kiocb {
struct hlist_node hash_node; struct hlist_node hash_node;
struct async_poll *apoll; struct async_poll *apoll;
struct io_wq_work work; struct io_wq_work work;
struct io_identity identity;
}; };
struct io_defer_entry { struct io_defer_entry {
...@@ -1072,8 +1071,7 @@ static inline void io_req_init_async(struct io_kiocb *req) ...@@ -1072,8 +1071,7 @@ static inline void io_req_init_async(struct io_kiocb *req)
memset(&req->work, 0, sizeof(req->work)); memset(&req->work, 0, sizeof(req->work));
req->flags |= REQ_F_WORK_INITIALIZED; req->flags |= REQ_F_WORK_INITIALIZED;
io_init_identity(&req->identity); req->work.identity = &current->io_uring->identity;
req->work.identity = &req->identity;
} }
static inline bool io_async_submit(struct io_ring_ctx *ctx) static inline bool io_async_submit(struct io_ring_ctx *ctx)
...@@ -1179,9 +1177,9 @@ static void __io_commit_cqring(struct io_ring_ctx *ctx) ...@@ -1179,9 +1177,9 @@ static void __io_commit_cqring(struct io_ring_ctx *ctx)
} }
} }
static void io_put_identity(struct io_kiocb *req) static void io_put_identity(struct io_uring_task *tctx, struct io_kiocb *req)
{ {
if (req->work.identity == &req->identity) if (req->work.identity == &tctx->identity)
return; return;
if (refcount_dec_and_test(&req->work.identity->count)) if (refcount_dec_and_test(&req->work.identity->count))
kfree(req->work.identity); kfree(req->work.identity);
...@@ -1220,7 +1218,7 @@ static void io_req_clean_work(struct io_kiocb *req) ...@@ -1220,7 +1218,7 @@ static void io_req_clean_work(struct io_kiocb *req)
req->work.flags &= ~IO_WQ_WORK_FS; req->work.flags &= ~IO_WQ_WORK_FS;
} }
io_put_identity(req); io_put_identity(req->task->io_uring, req);
} }
/* /*
...@@ -1229,6 +1227,7 @@ static void io_req_clean_work(struct io_kiocb *req) ...@@ -1229,6 +1227,7 @@ static void io_req_clean_work(struct io_kiocb *req)
*/ */
static bool io_identity_cow(struct io_kiocb *req) static bool io_identity_cow(struct io_kiocb *req)
{ {
struct io_uring_task *tctx = current->io_uring;
const struct cred *creds = NULL; const struct cred *creds = NULL;
struct io_identity *id; struct io_identity *id;
...@@ -1255,7 +1254,7 @@ static bool io_identity_cow(struct io_kiocb *req) ...@@ -1255,7 +1254,7 @@ static bool io_identity_cow(struct io_kiocb *req)
refcount_inc(&id->count); refcount_inc(&id->count);
/* drop old identity, assign new one. one ref for req, one for tctx */ /* drop old identity, assign new one. one ref for req, one for tctx */
if (req->work.identity != &req->identity && if (req->work.identity != &tctx->identity &&
refcount_sub_and_test(2, &req->work.identity->count)) refcount_sub_and_test(2, &req->work.identity->count))
kfree(req->work.identity); kfree(req->work.identity);
...@@ -1266,7 +1265,7 @@ static bool io_identity_cow(struct io_kiocb *req) ...@@ -1266,7 +1265,7 @@ static bool io_identity_cow(struct io_kiocb *req)
static bool io_grab_identity(struct io_kiocb *req) static bool io_grab_identity(struct io_kiocb *req)
{ {
const struct io_op_def *def = &io_op_defs[req->opcode]; const struct io_op_def *def = &io_op_defs[req->opcode];
struct io_identity *id = &req->identity; struct io_identity *id = req->work.identity;
struct io_ring_ctx *ctx = req->ctx; struct io_ring_ctx *ctx = req->ctx;
if (def->needs_fsize && id->fsize != rlimit(RLIMIT_FSIZE)) if (def->needs_fsize && id->fsize != rlimit(RLIMIT_FSIZE))
...@@ -1330,10 +1329,11 @@ static bool io_grab_identity(struct io_kiocb *req) ...@@ -1330,10 +1329,11 @@ static bool io_grab_identity(struct io_kiocb *req)
static void io_prep_async_work(struct io_kiocb *req) static void io_prep_async_work(struct io_kiocb *req)
{ {
const struct io_op_def *def = &io_op_defs[req->opcode]; const struct io_op_def *def = &io_op_defs[req->opcode];
struct io_identity *id = &req->identity;
struct io_ring_ctx *ctx = req->ctx; struct io_ring_ctx *ctx = req->ctx;
struct io_identity *id;
io_req_init_async(req); io_req_init_async(req);
id = req->work.identity;
if (req->flags & REQ_F_ISREG) { if (req->flags & REQ_F_ISREG) {
if (def->hash_reg_file || (ctx->flags & IORING_SETUP_IOPOLL)) if (def->hash_reg_file || (ctx->flags & IORING_SETUP_IOPOLL))
...@@ -6481,7 +6481,7 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req, ...@@ -6481,7 +6481,7 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
if (unlikely(!iod)) if (unlikely(!iod))
return -EINVAL; return -EINVAL;
refcount_inc(&iod->count); refcount_inc(&iod->count);
io_put_identity(req); io_put_identity(current->io_uring, req);
get_cred(iod->creds); get_cred(iod->creds);
req->work.identity = iod; req->work.identity = iod;
req->work.flags |= IO_WQ_WORK_CREDS; req->work.flags |= IO_WQ_WORK_CREDS;
...@@ -7691,6 +7691,7 @@ static int io_uring_alloc_task_context(struct task_struct *task) ...@@ -7691,6 +7691,7 @@ static int io_uring_alloc_task_context(struct task_struct *task)
tctx->in_idle = 0; tctx->in_idle = 0;
atomic_long_set(&tctx->req_issue, 0); atomic_long_set(&tctx->req_issue, 0);
atomic_long_set(&tctx->req_complete, 0); atomic_long_set(&tctx->req_complete, 0);
io_init_identity(&tctx->identity);
task->io_uring = tctx; task->io_uring = tctx;
return 0; return 0;
} }
......
...@@ -24,6 +24,7 @@ struct io_uring_task { ...@@ -24,6 +24,7 @@ struct io_uring_task {
struct wait_queue_head wait; struct wait_queue_head wait;
struct file *last; struct file *last;
atomic_long_t req_issue; atomic_long_t req_issue;
struct io_identity identity;
/* completion side */ /* completion side */
bool in_idle ____cacheline_aligned_in_smp; bool in_idle ____cacheline_aligned_in_smp;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment