Commit 7a7cacba authored by Pavel Begunkov's avatar Pavel Begunkov Committed by Jens Axboe

io_uring: indent left {send,recv}[msg]()

Flip over "if (sock)" condition with return on error, the upper layer
will take care. That change will be handy later, but already removes
an extra jump from hot path.
Signed-off-by: default avatarPavel Begunkov <asml.silence@gmail.com>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 06ef3608
...@@ -3916,42 +3916,41 @@ static int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) ...@@ -3916,42 +3916,41 @@ static int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
static int io_sendmsg(struct io_kiocb *req, bool force_nonblock, static int io_sendmsg(struct io_kiocb *req, bool force_nonblock,
struct io_comp_state *cs) struct io_comp_state *cs)
{ {
struct io_async_msghdr *kmsg = NULL; struct io_async_msghdr iomsg, *kmsg = NULL;
struct socket *sock; struct socket *sock;
unsigned flags;
int ret; int ret;
sock = sock_from_file(req->file, &ret); sock = sock_from_file(req->file, &ret);
if (sock) { if (unlikely(!sock))
struct io_async_msghdr iomsg; return ret;
unsigned flags;
if (req->io) {
kmsg = &req->io->msg;
kmsg->msg.msg_name = &req->io->msg.addr;
/* if iov is set, it's allocated already */
if (!kmsg->iov)
kmsg->iov = kmsg->fast_iov;
kmsg->msg.msg_iter.iov = kmsg->iov;
} else {
ret = io_sendmsg_copy_hdr(req, &iomsg);
if (ret)
return ret;
kmsg = &iomsg;
}
flags = req->sr_msg.msg_flags;
if (flags & MSG_DONTWAIT)
req->flags |= REQ_F_NOWAIT;
else if (force_nonblock)
flags |= MSG_DONTWAIT;
ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags); if (req->io) {
if (force_nonblock && ret == -EAGAIN) kmsg = &req->io->msg;
return io_setup_async_msg(req, kmsg); kmsg->msg.msg_name = &req->io->msg.addr;
if (ret == -ERESTARTSYS) /* if iov is set, it's allocated already */
ret = -EINTR; if (!kmsg->iov)
kmsg->iov = kmsg->fast_iov;
kmsg->msg.msg_iter.iov = kmsg->iov;
} else {
ret = io_sendmsg_copy_hdr(req, &iomsg);
if (ret)
return ret;
kmsg = &iomsg;
} }
flags = req->sr_msg.msg_flags;
if (flags & MSG_DONTWAIT)
req->flags |= REQ_F_NOWAIT;
else if (force_nonblock)
flags |= MSG_DONTWAIT;
ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags);
if (force_nonblock && ret == -EAGAIN)
return io_setup_async_msg(req, kmsg);
if (ret == -ERESTARTSYS)
ret = -EINTR;
if (kmsg && kmsg->iov != kmsg->fast_iov) if (kmsg && kmsg->iov != kmsg->fast_iov)
kfree(kmsg->iov); kfree(kmsg->iov);
req->flags &= ~REQ_F_NEED_CLEANUP; req->flags &= ~REQ_F_NEED_CLEANUP;
...@@ -3964,39 +3963,38 @@ static int io_sendmsg(struct io_kiocb *req, bool force_nonblock, ...@@ -3964,39 +3963,38 @@ static int io_sendmsg(struct io_kiocb *req, bool force_nonblock,
static int io_send(struct io_kiocb *req, bool force_nonblock, static int io_send(struct io_kiocb *req, bool force_nonblock,
struct io_comp_state *cs) struct io_comp_state *cs)
{ {
struct io_sr_msg *sr = &req->sr_msg;
struct msghdr msg;
struct iovec iov;
struct socket *sock; struct socket *sock;
unsigned flags;
int ret; int ret;
sock = sock_from_file(req->file, &ret); sock = sock_from_file(req->file, &ret);
if (sock) { if (unlikely(!sock))
struct io_sr_msg *sr = &req->sr_msg; return ret;
struct msghdr msg;
struct iovec iov;
unsigned flags;
ret = import_single_range(WRITE, sr->buf, sr->len, &iov, ret = import_single_range(WRITE, sr->buf, sr->len, &iov, &msg.msg_iter);
&msg.msg_iter); if (unlikely(ret))
if (ret) return ret;
return ret;
msg.msg_name = NULL; msg.msg_name = NULL;
msg.msg_control = NULL; msg.msg_control = NULL;
msg.msg_controllen = 0; msg.msg_controllen = 0;
msg.msg_namelen = 0; msg.msg_namelen = 0;
flags = req->sr_msg.msg_flags; flags = req->sr_msg.msg_flags;
if (flags & MSG_DONTWAIT) if (flags & MSG_DONTWAIT)
req->flags |= REQ_F_NOWAIT; req->flags |= REQ_F_NOWAIT;
else if (force_nonblock) else if (force_nonblock)
flags |= MSG_DONTWAIT; flags |= MSG_DONTWAIT;
msg.msg_flags = flags; msg.msg_flags = flags;
ret = sock_sendmsg(sock, &msg); ret = sock_sendmsg(sock, &msg);
if (force_nonblock && ret == -EAGAIN) if (force_nonblock && ret == -EAGAIN)
return -EAGAIN; return -EAGAIN;
if (ret == -ERESTARTSYS) if (ret == -ERESTARTSYS)
ret = -EINTR; ret = -EINTR;
}
if (ret < 0) if (ret < 0)
req_set_fail_links(req); req_set_fail_links(req);
...@@ -4149,62 +4147,62 @@ static int io_recvmsg_prep(struct io_kiocb *req, ...@@ -4149,62 +4147,62 @@ static int io_recvmsg_prep(struct io_kiocb *req,
static int io_recvmsg(struct io_kiocb *req, bool force_nonblock, static int io_recvmsg(struct io_kiocb *req, bool force_nonblock,
struct io_comp_state *cs) struct io_comp_state *cs)
{ {
struct io_async_msghdr *kmsg = NULL; struct io_async_msghdr iomsg, *kmsg = NULL;
struct socket *sock; struct socket *sock;
struct io_buffer *kbuf;
unsigned flags;
int ret, cflags = 0; int ret, cflags = 0;
sock = sock_from_file(req->file, &ret); sock = sock_from_file(req->file, &ret);
if (sock) { if (unlikely(!sock))
struct io_buffer *kbuf; return ret;
struct io_async_msghdr iomsg;
unsigned flags;
if (req->io) {
kmsg = &req->io->msg;
kmsg->msg.msg_name = &req->io->msg.addr;
/* if iov is set, it's allocated already */
if (!kmsg->iov)
kmsg->iov = kmsg->fast_iov;
kmsg->msg.msg_iter.iov = kmsg->iov;
} else {
ret = io_recvmsg_copy_hdr(req, &iomsg);
if (ret)
return ret;
kmsg = &iomsg;
}
kbuf = io_recv_buffer_select(req, &cflags, !force_nonblock); if (req->io) {
if (IS_ERR(kbuf)) { kmsg = &req->io->msg;
return PTR_ERR(kbuf); kmsg->msg.msg_name = &req->io->msg.addr;
} else if (kbuf) { /* if iov is set, it's allocated already */
kmsg->fast_iov[0].iov_base = u64_to_user_ptr(kbuf->addr); if (!kmsg->iov)
iov_iter_init(&kmsg->msg.msg_iter, READ, kmsg->iov, kmsg->iov = kmsg->fast_iov;
1, req->sr_msg.len); kmsg->msg.msg_iter.iov = kmsg->iov;
} } else {
ret = io_recvmsg_copy_hdr(req, &iomsg);
if (ret)
return ret;
kmsg = &iomsg;
}
flags = req->sr_msg.msg_flags; kbuf = io_recv_buffer_select(req, &cflags, !force_nonblock);
if (flags & MSG_DONTWAIT) if (IS_ERR(kbuf)) {
req->flags |= REQ_F_NOWAIT; return PTR_ERR(kbuf);
else if (force_nonblock) } else if (kbuf) {
flags |= MSG_DONTWAIT; kmsg->fast_iov[0].iov_base = u64_to_user_ptr(kbuf->addr);
iov_iter_init(&kmsg->msg.msg_iter, READ, kmsg->iov,
1, req->sr_msg.len);
}
ret = __sys_recvmsg_sock(sock, &kmsg->msg, req->sr_msg.umsg, flags = req->sr_msg.msg_flags;
kmsg->uaddr, flags); if (flags & MSG_DONTWAIT)
if (force_nonblock && ret == -EAGAIN) { req->flags |= REQ_F_NOWAIT;
ret = io_setup_async_msg(req, kmsg); else if (force_nonblock)
if (ret != -EAGAIN) flags |= MSG_DONTWAIT;
kfree(kbuf);
return ret; ret = __sys_recvmsg_sock(sock, &kmsg->msg, req->sr_msg.umsg,
} kmsg->uaddr, flags);
if (ret == -ERESTARTSYS) if (force_nonblock && ret == -EAGAIN) {
ret = -EINTR; ret = io_setup_async_msg(req, kmsg);
if (kbuf) if (ret != -EAGAIN)
kfree(kbuf); kfree(kbuf);
return ret;
} }
if (ret == -ERESTARTSYS)
ret = -EINTR;
if (kbuf)
kfree(kbuf);
if (kmsg && kmsg->iov != kmsg->fast_iov) if (kmsg && kmsg->iov != kmsg->fast_iov)
kfree(kmsg->iov); kfree(kmsg->iov);
req->flags &= ~REQ_F_NEED_CLEANUP; req->flags &= ~REQ_F_NEED_CLEANUP;
if (ret < 0) if (ret < 0)
req_set_fail_links(req); req_set_fail_links(req);
__io_req_complete(req, ret, cflags, cs); __io_req_complete(req, ret, cflags, cs);
...@@ -4215,51 +4213,50 @@ static int io_recv(struct io_kiocb *req, bool force_nonblock, ...@@ -4215,51 +4213,50 @@ static int io_recv(struct io_kiocb *req, bool force_nonblock,
struct io_comp_state *cs) struct io_comp_state *cs)
{ {
struct io_buffer *kbuf = NULL; struct io_buffer *kbuf = NULL;
struct io_sr_msg *sr = &req->sr_msg;
struct msghdr msg;
void __user *buf = sr->buf;
struct socket *sock; struct socket *sock;
struct iovec iov;
unsigned flags;
int ret, cflags = 0; int ret, cflags = 0;
sock = sock_from_file(req->file, &ret); sock = sock_from_file(req->file, &ret);
if (sock) { if (unlikely(!sock))
struct io_sr_msg *sr = &req->sr_msg; return ret;
void __user *buf = sr->buf;
struct msghdr msg;
struct iovec iov;
unsigned flags;
kbuf = io_recv_buffer_select(req, &cflags, !force_nonblock);
if (IS_ERR(kbuf))
return PTR_ERR(kbuf);
else if (kbuf)
buf = u64_to_user_ptr(kbuf->addr);
ret = import_single_range(READ, buf, sr->len, &iov, kbuf = io_recv_buffer_select(req, &cflags, !force_nonblock);
&msg.msg_iter); if (IS_ERR(kbuf))
if (ret) { return PTR_ERR(kbuf);
kfree(kbuf); else if (kbuf)
return ret; buf = u64_to_user_ptr(kbuf->addr);
}
req->flags |= REQ_F_NEED_CLEANUP; ret = import_single_range(READ, buf, sr->len, &iov, &msg.msg_iter);
msg.msg_name = NULL; if (unlikely(ret)) {
msg.msg_control = NULL; kfree(kbuf);
msg.msg_controllen = 0; return ret;
msg.msg_namelen = 0;
msg.msg_iocb = NULL;
msg.msg_flags = 0;
flags = req->sr_msg.msg_flags;
if (flags & MSG_DONTWAIT)
req->flags |= REQ_F_NOWAIT;
else if (force_nonblock)
flags |= MSG_DONTWAIT;
ret = sock_recvmsg(sock, &msg, flags);
if (force_nonblock && ret == -EAGAIN)
return -EAGAIN;
if (ret == -ERESTARTSYS)
ret = -EINTR;
} }
req->flags |= REQ_F_NEED_CLEANUP;
msg.msg_name = NULL;
msg.msg_control = NULL;
msg.msg_controllen = 0;
msg.msg_namelen = 0;
msg.msg_iocb = NULL;
msg.msg_flags = 0;
flags = req->sr_msg.msg_flags;
if (flags & MSG_DONTWAIT)
req->flags |= REQ_F_NOWAIT;
else if (force_nonblock)
flags |= MSG_DONTWAIT;
ret = sock_recvmsg(sock, &msg, flags);
if (force_nonblock && ret == -EAGAIN)
return -EAGAIN;
if (ret == -ERESTARTSYS)
ret = -EINTR;
kfree(kbuf); kfree(kbuf);
req->flags &= ~REQ_F_NEED_CLEANUP; req->flags &= ~REQ_F_NEED_CLEANUP;
if (ret < 0) if (ret < 0)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment