Commit 4edf20f9 authored by Pavel Begunkov's avatar Pavel Begunkov Committed by Jens Axboe

io_uring: dig out COMP_LOCK from deep call chain

io_req_clean_work() checks REQ_F_COMP_LOCK to pass this two layers up.
Move the check up into __io_free_req(), so at least it doesn't looks so
ugly and would facilitate further changes.
Signed-off-by: default avatarPavel Begunkov <asml.silence@gmail.com>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 6a0af224
...@@ -1181,14 +1181,10 @@ static void __io_commit_cqring(struct io_ring_ctx *ctx) ...@@ -1181,14 +1181,10 @@ static void __io_commit_cqring(struct io_ring_ctx *ctx)
} }
} }
/* static void io_req_clean_work(struct io_kiocb *req)
* Returns true if we need to defer file table putting. This can only happen
* from the error path with REQ_F_COMP_LOCKED set.
*/
static bool io_req_clean_work(struct io_kiocb *req)
{ {
if (!(req->flags & REQ_F_WORK_INITIALIZED)) if (!(req->flags & REQ_F_WORK_INITIALIZED))
return false; return;
req->flags &= ~REQ_F_WORK_INITIALIZED; req->flags &= ~REQ_F_WORK_INITIALIZED;
...@@ -1207,9 +1203,6 @@ static bool io_req_clean_work(struct io_kiocb *req) ...@@ -1207,9 +1203,6 @@ static bool io_req_clean_work(struct io_kiocb *req)
if (req->work.fs) { if (req->work.fs) {
struct fs_struct *fs = req->work.fs; struct fs_struct *fs = req->work.fs;
if (req->flags & REQ_F_COMP_LOCKED)
return true;
spin_lock(&req->work.fs->lock); spin_lock(&req->work.fs->lock);
if (--fs->users) if (--fs->users)
fs = NULL; fs = NULL;
...@@ -1218,8 +1211,6 @@ static bool io_req_clean_work(struct io_kiocb *req) ...@@ -1218,8 +1211,6 @@ static bool io_req_clean_work(struct io_kiocb *req)
free_fs_struct(fs); free_fs_struct(fs);
req->work.fs = NULL; req->work.fs = NULL;
} }
return false;
} }
static void io_prep_async_work(struct io_kiocb *req) static void io_prep_async_work(struct io_kiocb *req)
...@@ -1699,7 +1690,7 @@ static inline void io_put_file(struct io_kiocb *req, struct file *file, ...@@ -1699,7 +1690,7 @@ static inline void io_put_file(struct io_kiocb *req, struct file *file,
fput(file); fput(file);
} }
static bool io_dismantle_req(struct io_kiocb *req) static void io_dismantle_req(struct io_kiocb *req)
{ {
io_clean_op(req); io_clean_op(req);
...@@ -1708,7 +1699,7 @@ static bool io_dismantle_req(struct io_kiocb *req) ...@@ -1708,7 +1699,7 @@ static bool io_dismantle_req(struct io_kiocb *req)
if (req->file) if (req->file)
io_put_file(req, req->file, (req->flags & REQ_F_FIXED_FILE)); io_put_file(req, req->file, (req->flags & REQ_F_FIXED_FILE));
return io_req_clean_work(req); io_req_clean_work(req);
} }
static void __io_free_req_finish(struct io_kiocb *req) static void __io_free_req_finish(struct io_kiocb *req)
...@@ -1731,21 +1722,15 @@ static void __io_free_req_finish(struct io_kiocb *req) ...@@ -1731,21 +1722,15 @@ static void __io_free_req_finish(struct io_kiocb *req)
static void io_req_task_file_table_put(struct callback_head *cb) static void io_req_task_file_table_put(struct callback_head *cb)
{ {
struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work); struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
struct fs_struct *fs = req->work.fs;
io_dismantle_req(req);
spin_lock(&req->work.fs->lock);
if (--fs->users)
fs = NULL;
spin_unlock(&req->work.fs->lock);
if (fs)
free_fs_struct(fs);
req->work.fs = NULL;
__io_free_req_finish(req); __io_free_req_finish(req);
} }
static void __io_free_req(struct io_kiocb *req) static void __io_free_req(struct io_kiocb *req)
{ {
if (!io_dismantle_req(req)) { if (!(req->flags & REQ_F_COMP_LOCKED)) {
io_dismantle_req(req);
__io_free_req_finish(req); __io_free_req_finish(req);
} else { } else {
int ret; int ret;
...@@ -2057,7 +2042,7 @@ static void io_req_free_batch(struct req_batch *rb, struct io_kiocb *req) ...@@ -2057,7 +2042,7 @@ static void io_req_free_batch(struct req_batch *rb, struct io_kiocb *req)
} }
rb->task_refs++; rb->task_refs++;
WARN_ON_ONCE(io_dismantle_req(req)); io_dismantle_req(req);
rb->reqs[rb->to_free++] = req; rb->reqs[rb->to_free++] = req;
if (unlikely(rb->to_free == ARRAY_SIZE(rb->reqs))) if (unlikely(rb->to_free == ARRAY_SIZE(rb->reqs)))
__io_req_free_batch_flush(req->ctx, rb); __io_req_free_batch_flush(req->ctx, rb);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment