Commit 8452fd0c authored by Jens Axboe's avatar Jens Axboe

io_uring: cleanup io_import_iovec() of pre-mapped request

io_rw_prep_async() goes through a dance of clearing req->io, calling
the iovec import, then re-setting req->io. Provide an internal helper
that does the right thing without needing state tweaked to get there.

This enables further cleanups in io_read, io_write, and
io_resubmit_prep(), but that's left for another time.
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 3b2a4439
...@@ -2819,7 +2819,7 @@ static ssize_t io_iov_buffer_select(struct io_kiocb *req, struct iovec *iov, ...@@ -2819,7 +2819,7 @@ static ssize_t io_iov_buffer_select(struct io_kiocb *req, struct iovec *iov,
return __io_iov_buffer_select(req, iov, needs_lock); return __io_iov_buffer_select(req, iov, needs_lock);
} }
static ssize_t io_import_iovec(int rw, struct io_kiocb *req, static ssize_t __io_import_iovec(int rw, struct io_kiocb *req,
struct iovec **iovec, struct iov_iter *iter, struct iovec **iovec, struct iov_iter *iter,
bool needs_lock) bool needs_lock)
{ {
...@@ -2828,13 +2828,6 @@ static ssize_t io_import_iovec(int rw, struct io_kiocb *req, ...@@ -2828,13 +2828,6 @@ static ssize_t io_import_iovec(int rw, struct io_kiocb *req,
ssize_t ret; ssize_t ret;
u8 opcode; u8 opcode;
if (req->io) {
struct io_async_rw *iorw = &req->io->rw;
*iovec = NULL;
return iov_iter_count(&iorw->iter);
}
opcode = req->opcode; opcode = req->opcode;
if (opcode == IORING_OP_READ_FIXED || opcode == IORING_OP_WRITE_FIXED) { if (opcode == IORING_OP_READ_FIXED || opcode == IORING_OP_WRITE_FIXED) {
*iovec = NULL; *iovec = NULL;
...@@ -2879,6 +2872,16 @@ static ssize_t io_import_iovec(int rw, struct io_kiocb *req, ...@@ -2879,6 +2872,16 @@ static ssize_t io_import_iovec(int rw, struct io_kiocb *req,
return import_iovec(rw, buf, sqe_len, UIO_FASTIOV, iovec, iter); return import_iovec(rw, buf, sqe_len, UIO_FASTIOV, iovec, iter);
} }
static ssize_t io_import_iovec(int rw, struct io_kiocb *req,
struct iovec **iovec, struct iov_iter *iter,
bool needs_lock)
{
if (!req->io)
return __io_import_iovec(rw, req, iovec, iter, needs_lock);
*iovec = NULL;
return iov_iter_count(&req->io->rw.iter);
}
/* /*
* For files that don't have ->read_iter() and ->write_iter(), handle them * For files that don't have ->read_iter() and ->write_iter(), handle them
* by looping over ->read() or ->write() manually. * by looping over ->read() or ->write() manually.
...@@ -3001,11 +3004,8 @@ static inline int io_rw_prep_async(struct io_kiocb *req, int rw, ...@@ -3001,11 +3004,8 @@ static inline int io_rw_prep_async(struct io_kiocb *req, int rw,
ssize_t ret; ssize_t ret;
iorw->iter.iov = iorw->fast_iov; iorw->iter.iov = iorw->fast_iov;
/* reset ->io around the iovec import, we don't want to use it */ ret = __io_import_iovec(rw, req, (struct iovec **) &iorw->iter.iov,
req->io = NULL;
ret = io_import_iovec(rw, req, (struct iovec **) &iorw->iter.iov,
&iorw->iter, !force_nonblock); &iorw->iter, !force_nonblock);
req->io = container_of(iorw, struct io_async_ctx, rw);
if (unlikely(ret < 0)) if (unlikely(ret < 0))
return ret; return ret;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment