Commit 51aac424 authored by Pavel Begunkov's avatar Pavel Begunkov Committed by Jens Axboe

io_uring: optimise io_import_iovec nonblock passing

First, change IO_URING_F_NONBLOCK to take sign bit of the int, so
checking for it can be turned into test + sign-based-jump, makes the
binary smaller and may be faster.

Then, instead of passing need_lock boolean into io_import_iovec() just
give it issue_flags, which is already stored somewhere. Saves some space
on stack, a couple of test + cmov operations and other conversions.

note: we still leave
force_nonblock = issue_flags & IO_URING_F_NONBLOCK
variable, but it's optimised out by the compiler into testing
issue_flags directly.
Signed-off-by: default avatarPavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/ee96547e692f6c975c229cd82fc721679571a734.1634144845.git.asml.silence@gmail.comSigned-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent c88598a9
...@@ -198,8 +198,9 @@ struct io_rings { ...@@ -198,8 +198,9 @@ struct io_rings {
}; };
enum io_uring_cmd_flags { enum io_uring_cmd_flags {
IO_URING_F_NONBLOCK = 1, IO_URING_F_COMPLETE_DEFER = 1,
IO_URING_F_COMPLETE_DEFER = 2, /* int's last bit, sign checks are usually faster than a bit test */
IO_URING_F_NONBLOCK = INT_MIN,
}; };
struct io_mapped_ubuf { struct io_mapped_ubuf {
...@@ -3037,10 +3038,11 @@ static void io_ring_submit_lock(struct io_ring_ctx *ctx, bool needs_lock) ...@@ -3037,10 +3038,11 @@ static void io_ring_submit_lock(struct io_ring_ctx *ctx, bool needs_lock)
} }
static struct io_buffer *io_buffer_select(struct io_kiocb *req, size_t *len, static struct io_buffer *io_buffer_select(struct io_kiocb *req, size_t *len,
int bgid, bool needs_lock) int bgid, unsigned int issue_flags)
{ {
struct io_buffer *kbuf = req->kbuf; struct io_buffer *kbuf = req->kbuf;
struct io_buffer *head; struct io_buffer *head;
bool needs_lock = !(issue_flags & IO_URING_F_NONBLOCK);
if (req->flags & REQ_F_BUFFER_SELECTED) if (req->flags & REQ_F_BUFFER_SELECTED)
return kbuf; return kbuf;
...@@ -3072,13 +3074,13 @@ static struct io_buffer *io_buffer_select(struct io_kiocb *req, size_t *len, ...@@ -3072,13 +3074,13 @@ static struct io_buffer *io_buffer_select(struct io_kiocb *req, size_t *len,
} }
static void __user *io_rw_buffer_select(struct io_kiocb *req, size_t *len, static void __user *io_rw_buffer_select(struct io_kiocb *req, size_t *len,
bool needs_lock) unsigned int issue_flags)
{ {
struct io_buffer *kbuf; struct io_buffer *kbuf;
u16 bgid; u16 bgid;
bgid = req->buf_index; bgid = req->buf_index;
kbuf = io_buffer_select(req, len, bgid, needs_lock); kbuf = io_buffer_select(req, len, bgid, issue_flags);
if (IS_ERR(kbuf)) if (IS_ERR(kbuf))
return kbuf; return kbuf;
return u64_to_user_ptr(kbuf->addr); return u64_to_user_ptr(kbuf->addr);
...@@ -3086,7 +3088,7 @@ static void __user *io_rw_buffer_select(struct io_kiocb *req, size_t *len, ...@@ -3086,7 +3088,7 @@ static void __user *io_rw_buffer_select(struct io_kiocb *req, size_t *len,
#ifdef CONFIG_COMPAT #ifdef CONFIG_COMPAT
static ssize_t io_compat_import(struct io_kiocb *req, struct iovec *iov, static ssize_t io_compat_import(struct io_kiocb *req, struct iovec *iov,
bool needs_lock) unsigned int issue_flags)
{ {
struct compat_iovec __user *uiov; struct compat_iovec __user *uiov;
compat_ssize_t clen; compat_ssize_t clen;
...@@ -3102,7 +3104,7 @@ static ssize_t io_compat_import(struct io_kiocb *req, struct iovec *iov, ...@@ -3102,7 +3104,7 @@ static ssize_t io_compat_import(struct io_kiocb *req, struct iovec *iov,
return -EINVAL; return -EINVAL;
len = clen; len = clen;
buf = io_rw_buffer_select(req, &len, needs_lock); buf = io_rw_buffer_select(req, &len, issue_flags);
if (IS_ERR(buf)) if (IS_ERR(buf))
return PTR_ERR(buf); return PTR_ERR(buf);
iov[0].iov_base = buf; iov[0].iov_base = buf;
...@@ -3112,7 +3114,7 @@ static ssize_t io_compat_import(struct io_kiocb *req, struct iovec *iov, ...@@ -3112,7 +3114,7 @@ static ssize_t io_compat_import(struct io_kiocb *req, struct iovec *iov,
#endif #endif
static ssize_t __io_iov_buffer_select(struct io_kiocb *req, struct iovec *iov, static ssize_t __io_iov_buffer_select(struct io_kiocb *req, struct iovec *iov,
bool needs_lock) unsigned int issue_flags)
{ {
struct iovec __user *uiov = u64_to_user_ptr(req->rw.addr); struct iovec __user *uiov = u64_to_user_ptr(req->rw.addr);
void __user *buf; void __user *buf;
...@@ -3124,7 +3126,7 @@ static ssize_t __io_iov_buffer_select(struct io_kiocb *req, struct iovec *iov, ...@@ -3124,7 +3126,7 @@ static ssize_t __io_iov_buffer_select(struct io_kiocb *req, struct iovec *iov,
len = iov[0].iov_len; len = iov[0].iov_len;
if (len < 0) if (len < 0)
return -EINVAL; return -EINVAL;
buf = io_rw_buffer_select(req, &len, needs_lock); buf = io_rw_buffer_select(req, &len, issue_flags);
if (IS_ERR(buf)) if (IS_ERR(buf))
return PTR_ERR(buf); return PTR_ERR(buf);
iov[0].iov_base = buf; iov[0].iov_base = buf;
...@@ -3133,7 +3135,7 @@ static ssize_t __io_iov_buffer_select(struct io_kiocb *req, struct iovec *iov, ...@@ -3133,7 +3135,7 @@ static ssize_t __io_iov_buffer_select(struct io_kiocb *req, struct iovec *iov,
} }
static ssize_t io_iov_buffer_select(struct io_kiocb *req, struct iovec *iov, static ssize_t io_iov_buffer_select(struct io_kiocb *req, struct iovec *iov,
bool needs_lock) unsigned int issue_flags)
{ {
if (req->flags & REQ_F_BUFFER_SELECTED) { if (req->flags & REQ_F_BUFFER_SELECTED) {
struct io_buffer *kbuf = req->kbuf; struct io_buffer *kbuf = req->kbuf;
...@@ -3147,14 +3149,14 @@ static ssize_t io_iov_buffer_select(struct io_kiocb *req, struct iovec *iov, ...@@ -3147,14 +3149,14 @@ static ssize_t io_iov_buffer_select(struct io_kiocb *req, struct iovec *iov,
#ifdef CONFIG_COMPAT #ifdef CONFIG_COMPAT
if (req->ctx->compat) if (req->ctx->compat)
return io_compat_import(req, iov, needs_lock); return io_compat_import(req, iov, issue_flags);
#endif #endif
return __io_iov_buffer_select(req, iov, needs_lock); return __io_iov_buffer_select(req, iov, issue_flags);
} }
static int io_import_iovec(int rw, struct io_kiocb *req, struct iovec **iovec, static int io_import_iovec(int rw, struct io_kiocb *req, struct iovec **iovec,
struct iov_iter *iter, bool needs_lock) struct iov_iter *iter, unsigned int issue_flags)
{ {
void __user *buf = u64_to_user_ptr(req->rw.addr); void __user *buf = u64_to_user_ptr(req->rw.addr);
size_t sqe_len = req->rw.len; size_t sqe_len = req->rw.len;
...@@ -3172,7 +3174,7 @@ static int io_import_iovec(int rw, struct io_kiocb *req, struct iovec **iovec, ...@@ -3172,7 +3174,7 @@ static int io_import_iovec(int rw, struct io_kiocb *req, struct iovec **iovec,
if (opcode == IORING_OP_READ || opcode == IORING_OP_WRITE) { if (opcode == IORING_OP_READ || opcode == IORING_OP_WRITE) {
if (req->flags & REQ_F_BUFFER_SELECT) { if (req->flags & REQ_F_BUFFER_SELECT) {
buf = io_rw_buffer_select(req, &sqe_len, needs_lock); buf = io_rw_buffer_select(req, &sqe_len, issue_flags);
if (IS_ERR(buf)) if (IS_ERR(buf))
return PTR_ERR(buf); return PTR_ERR(buf);
req->rw.len = sqe_len; req->rw.len = sqe_len;
...@@ -3184,7 +3186,7 @@ static int io_import_iovec(int rw, struct io_kiocb *req, struct iovec **iovec, ...@@ -3184,7 +3186,7 @@ static int io_import_iovec(int rw, struct io_kiocb *req, struct iovec **iovec,
} }
if (req->flags & REQ_F_BUFFER_SELECT) { if (req->flags & REQ_F_BUFFER_SELECT) {
ret = io_iov_buffer_select(req, *iovec, needs_lock); ret = io_iov_buffer_select(req, *iovec, issue_flags);
if (!ret) if (!ret)
iov_iter_init(iter, rw, *iovec, 1, (*iovec)->iov_len); iov_iter_init(iter, rw, *iovec, 1, (*iovec)->iov_len);
*iovec = NULL; *iovec = NULL;
...@@ -3323,7 +3325,8 @@ static inline int io_rw_prep_async(struct io_kiocb *req, int rw) ...@@ -3323,7 +3325,8 @@ static inline int io_rw_prep_async(struct io_kiocb *req, int rw)
struct iovec *iov = iorw->s.fast_iov; struct iovec *iov = iorw->s.fast_iov;
int ret; int ret;
ret = io_import_iovec(rw, req, &iov, &iorw->s.iter, false); /* submission path, ->uring_lock should already be taken */
ret = io_import_iovec(rw, req, &iov, &iorw->s.iter, IO_URING_F_NONBLOCK);
if (unlikely(ret < 0)) if (unlikely(ret < 0))
return ret; return ret;
...@@ -3451,7 +3454,7 @@ static int io_read(struct io_kiocb *req, unsigned int issue_flags) ...@@ -3451,7 +3454,7 @@ static int io_read(struct io_kiocb *req, unsigned int issue_flags)
} else { } else {
s = &__s; s = &__s;
iovec = s->fast_iov; iovec = s->fast_iov;
ret = io_import_iovec(READ, req, &iovec, &s->iter, !force_nonblock); ret = io_import_iovec(READ, req, &iovec, &s->iter, issue_flags);
if (ret < 0) if (ret < 0)
return ret; return ret;
...@@ -3579,7 +3582,7 @@ static int io_write(struct io_kiocb *req, unsigned int issue_flags) ...@@ -3579,7 +3582,7 @@ static int io_write(struct io_kiocb *req, unsigned int issue_flags)
} else { } else {
s = &__s; s = &__s;
iovec = s->fast_iov; iovec = s->fast_iov;
ret = io_import_iovec(WRITE, req, &iovec, &s->iter, !force_nonblock); ret = io_import_iovec(WRITE, req, &iovec, &s->iter, issue_flags);
if (ret < 0) if (ret < 0)
return ret; return ret;
iov_iter_save_state(&s->iter, &s->iter_state); iov_iter_save_state(&s->iter, &s->iter_state);
...@@ -4902,11 +4905,11 @@ static int io_recvmsg_copy_hdr(struct io_kiocb *req, ...@@ -4902,11 +4905,11 @@ static int io_recvmsg_copy_hdr(struct io_kiocb *req,
} }
static struct io_buffer *io_recv_buffer_select(struct io_kiocb *req, static struct io_buffer *io_recv_buffer_select(struct io_kiocb *req,
bool needs_lock) unsigned int issue_flags)
{ {
struct io_sr_msg *sr = &req->sr_msg; struct io_sr_msg *sr = &req->sr_msg;
return io_buffer_select(req, &sr->len, sr->bgid, needs_lock); return io_buffer_select(req, &sr->len, sr->bgid, issue_flags);
} }
static inline unsigned int io_put_recv_kbuf(struct io_kiocb *req) static inline unsigned int io_put_recv_kbuf(struct io_kiocb *req)
...@@ -4969,7 +4972,7 @@ static int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags) ...@@ -4969,7 +4972,7 @@ static int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags)
} }
if (req->flags & REQ_F_BUFFER_SELECT) { if (req->flags & REQ_F_BUFFER_SELECT) {
kbuf = io_recv_buffer_select(req, !force_nonblock); kbuf = io_recv_buffer_select(req, issue_flags);
if (IS_ERR(kbuf)) if (IS_ERR(kbuf))
return PTR_ERR(kbuf); return PTR_ERR(kbuf);
kmsg->fast_iov[0].iov_base = u64_to_user_ptr(kbuf->addr); kmsg->fast_iov[0].iov_base = u64_to_user_ptr(kbuf->addr);
...@@ -5021,7 +5024,7 @@ static int io_recv(struct io_kiocb *req, unsigned int issue_flags) ...@@ -5021,7 +5024,7 @@ static int io_recv(struct io_kiocb *req, unsigned int issue_flags)
return -ENOTSOCK; return -ENOTSOCK;
if (req->flags & REQ_F_BUFFER_SELECT) { if (req->flags & REQ_F_BUFFER_SELECT) {
kbuf = io_recv_buffer_select(req, !force_nonblock); kbuf = io_recv_buffer_select(req, issue_flags);
if (IS_ERR(kbuf)) if (IS_ERR(kbuf))
return PTR_ERR(kbuf); return PTR_ERR(kbuf);
buf = u64_to_user_ptr(kbuf->addr); buf = u64_to_user_ptr(kbuf->addr);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment