Commit 5ea5dd45 authored by Pavel Begunkov's avatar Pavel Begunkov Committed by Jens Axboe

io_uring: inline io_read()'s iovec freeing

io_read() has not the simpliest control flow with a lot of jumps and
it's hard to read. One of those is a out_free: label, which frees iovec.
However, from the middle of io_read() iovec is NULL'ed and so
kfree(iovec) is no-op, it leaves us with two place where we can inline
it and further clean up the code.
Signed-off-by: default avatarPavel Begunkov <asml.silence@gmail.com>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 7335e3bf
...@@ -3530,14 +3530,18 @@ static int io_read(struct io_kiocb *req, bool force_nonblock, ...@@ -3530,14 +3530,18 @@ static int io_read(struct io_kiocb *req, bool force_nonblock,
} }
ret = rw_verify_area(READ, req->file, io_kiocb_ppos(kiocb), io_size); ret = rw_verify_area(READ, req->file, io_kiocb_ppos(kiocb), io_size);
if (unlikely(ret)) if (unlikely(ret)) {
goto out_free; kfree(iovec);
return ret;
}
ret = io_iter_do_read(req, iter); ret = io_iter_do_read(req, iter);
if (ret == -EIOCBQUEUED) { if (ret == -EIOCBQUEUED) {
ret = 0; /* it's faster to check here then delegate to kfree */
goto out_free; if (iovec)
kfree(iovec);
return 0;
} else if (ret == -EAGAIN) { } else if (ret == -EAGAIN) {
/* IOPOLL retry should happen for io-wq threads */ /* IOPOLL retry should happen for io-wq threads */
if (!force_nonblock && !(req->ctx->flags & IORING_SETUP_IOPOLL)) if (!force_nonblock && !(req->ctx->flags & IORING_SETUP_IOPOLL))
...@@ -3560,8 +3564,6 @@ static int io_read(struct io_kiocb *req, bool force_nonblock, ...@@ -3560,8 +3564,6 @@ static int io_read(struct io_kiocb *req, bool force_nonblock,
return ret2; return ret2;
rw = req->async_data; rw = req->async_data;
/* it's copied and will be cleaned with ->io */
iovec = NULL;
/* now use our persistent iterator, if we aren't already */ /* now use our persistent iterator, if we aren't already */
iter = &rw->iter; iter = &rw->iter;
retry: retry:
...@@ -3580,21 +3582,14 @@ static int io_read(struct io_kiocb *req, bool force_nonblock, ...@@ -3580,21 +3582,14 @@ static int io_read(struct io_kiocb *req, bool force_nonblock,
* do, then just retry at the new offset. * do, then just retry at the new offset.
*/ */
ret = io_iter_do_read(req, iter); ret = io_iter_do_read(req, iter);
if (ret == -EIOCBQUEUED) { if (ret == -EIOCBQUEUED)
ret = 0; return 0;
goto out_free;
} else if (ret > 0 && ret < io_size) {
/* we got some bytes, but not all. retry. */ /* we got some bytes, but not all. retry. */
if (ret > 0 && ret < io_size)
goto retry; goto retry;
}
done: done:
kiocb_done(kiocb, ret, cs); kiocb_done(kiocb, ret, cs);
ret = 0; return 0;
out_free:
/* it's reportedly faster than delegating the null check to kfree() */
if (iovec)
kfree(iovec);
return ret;
} }
static int io_write_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) static int io_write_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment