Commit 50220d6a authored by Jens Axboe's avatar Jens Axboe

io_uring/net: get rid of ->prep_async() for send side

Move the io_async_msghdr out of the issue path and into prep handling,
e it's now done unconditionally and hence does not need to be part
of the issue path. This means any usage of io_sendrecv_prep_async() and
io_sendmsg_prep_async(), and hence the forced async setup path is now
unified with the normal prep setup.
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent c6f32c7d
...@@ -290,50 +290,56 @@ static int io_sendmsg_copy_hdr(struct io_kiocb *req, ...@@ -290,50 +290,56 @@ static int io_sendmsg_copy_hdr(struct io_kiocb *req,
return ret; return ret;
} }
int io_sendrecv_prep_async(struct io_kiocb *req) void io_sendmsg_recvmsg_cleanup(struct io_kiocb *req)
{
struct io_async_msghdr *io = req->async_data;
kfree(io->free_iov);
}
static int io_send_setup(struct io_kiocb *req)
{ {
struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
struct io_async_msghdr *io; struct io_async_msghdr *kmsg = req->async_data;
int ret; int ret;
if (req_has_async_data(req)) kmsg->msg.msg_name = NULL;
return 0; kmsg->msg.msg_namelen = 0;
sr->done_io = 0; kmsg->msg.msg_control = NULL;
if (!sr->addr) kmsg->msg.msg_controllen = 0;
return 0; kmsg->msg.msg_ubuf = NULL;
io = io_msg_alloc_async_prep(req);
if (!io) if (sr->addr) {
return -ENOMEM; ret = move_addr_to_kernel(sr->addr, sr->addr_len, &kmsg->addr);
memset(&io->msg, 0, sizeof(io->msg)); if (unlikely(ret < 0))
ret = import_ubuf(ITER_SOURCE, sr->buf, sr->len, &io->msg.msg_iter); return ret;
if (unlikely(ret)) kmsg->msg.msg_name = &kmsg->addr;
kmsg->msg.msg_namelen = sr->addr_len;
}
ret = import_ubuf(ITER_SOURCE, sr->buf, sr->len, &kmsg->msg.msg_iter);
if (unlikely(ret < 0))
return ret; return ret;
io->msg.msg_name = &io->addr;
io->msg.msg_namelen = sr->addr_len; return 0;
return move_addr_to_kernel(sr->addr, sr->addr_len, &io->addr);
} }
int io_sendmsg_prep_async(struct io_kiocb *req) static int io_sendmsg_prep_setup(struct io_kiocb *req, int is_msg)
{ {
struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); struct io_async_msghdr *kmsg;
int ret; int ret;
sr->done_io = 0; /* always locked for prep */
if (!io_msg_alloc_async_prep(req)) kmsg = io_msg_alloc_async(req, 0);
if (unlikely(!kmsg))
return -ENOMEM; return -ENOMEM;
ret = io_sendmsg_copy_hdr(req, req->async_data); if (!is_msg)
return io_send_setup(req);
ret = io_sendmsg_copy_hdr(req, kmsg);
if (!ret) if (!ret)
req->flags |= REQ_F_NEED_CLEANUP; req->flags |= REQ_F_NEED_CLEANUP;
return ret; return ret;
} }
void io_sendmsg_recvmsg_cleanup(struct io_kiocb *req)
{
struct io_async_msghdr *io = req->async_data;
kfree(io->free_iov);
}
int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{ {
struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
...@@ -362,7 +368,7 @@ int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) ...@@ -362,7 +368,7 @@ int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
if (req->ctx->compat) if (req->ctx->compat)
sr->msg_flags |= MSG_CMSG_COMPAT; sr->msg_flags |= MSG_CMSG_COMPAT;
#endif #endif
return 0; return io_sendmsg_prep_setup(req, req->opcode == IORING_OP_SENDMSG);
} }
static void io_req_msg_cleanup(struct io_kiocb *req, static void io_req_msg_cleanup(struct io_kiocb *req,
...@@ -379,7 +385,7 @@ static void io_req_msg_cleanup(struct io_kiocb *req, ...@@ -379,7 +385,7 @@ static void io_req_msg_cleanup(struct io_kiocb *req,
int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags) int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags)
{ {
struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
struct io_async_msghdr *kmsg; struct io_async_msghdr *kmsg = req->async_data;
struct socket *sock; struct socket *sock;
unsigned flags; unsigned flags;
int min_ret = 0; int min_ret = 0;
...@@ -389,17 +395,6 @@ int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags) ...@@ -389,17 +395,6 @@ int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags)
if (unlikely(!sock)) if (unlikely(!sock))
return -ENOTSOCK; return -ENOTSOCK;
if (req_has_async_data(req)) {
kmsg = req->async_data;
} else {
kmsg = io_msg_alloc_async(req, issue_flags);
if (unlikely(!kmsg))
return -ENOMEM;
ret = io_sendmsg_copy_hdr(req, kmsg);
if (ret)
return ret;
}
if (!(req->flags & REQ_F_POLLED) && if (!(req->flags & REQ_F_POLLED) &&
(sr->flags & IORING_RECVSEND_POLL_FIRST)) (sr->flags & IORING_RECVSEND_POLL_FIRST))
return -EAGAIN; return -EAGAIN;
...@@ -437,52 +432,10 @@ int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags) ...@@ -437,52 +432,10 @@ int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags)
return IOU_OK; return IOU_OK;
} }
static struct io_async_msghdr *io_send_setup(struct io_kiocb *req,
unsigned int issue_flags)
{
struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
struct io_async_msghdr *kmsg;
int ret;
if (req_has_async_data(req)) {
kmsg = req->async_data;
} else {
kmsg = io_msg_alloc_async(req, issue_flags);
if (unlikely(!kmsg))
return ERR_PTR(-ENOMEM);
kmsg->msg.msg_name = NULL;
kmsg->msg.msg_namelen = 0;
kmsg->msg.msg_control = NULL;
kmsg->msg.msg_controllen = 0;
kmsg->msg.msg_ubuf = NULL;
if (sr->addr) {
ret = move_addr_to_kernel(sr->addr, sr->addr_len,
&kmsg->addr);
if (unlikely(ret < 0))
return ERR_PTR(ret);
kmsg->msg.msg_name = &kmsg->addr;
kmsg->msg.msg_namelen = sr->addr_len;
}
ret = import_ubuf(ITER_SOURCE, sr->buf, sr->len,
&kmsg->msg.msg_iter);
if (unlikely(ret))
return ERR_PTR(ret);
}
if (!(req->flags & REQ_F_POLLED) &&
(sr->flags & IORING_RECVSEND_POLL_FIRST))
return ERR_PTR(-EAGAIN);
return kmsg;
}
int io_send(struct io_kiocb *req, unsigned int issue_flags) int io_send(struct io_kiocb *req, unsigned int issue_flags)
{ {
struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
struct io_async_msghdr *kmsg; struct io_async_msghdr *kmsg = req->async_data;
size_t len = sr->len;
struct socket *sock; struct socket *sock;
unsigned flags; unsigned flags;
int min_ret = 0; int min_ret = 0;
...@@ -492,13 +445,9 @@ int io_send(struct io_kiocb *req, unsigned int issue_flags) ...@@ -492,13 +445,9 @@ int io_send(struct io_kiocb *req, unsigned int issue_flags)
if (unlikely(!sock)) if (unlikely(!sock))
return -ENOTSOCK; return -ENOTSOCK;
kmsg = io_send_setup(req, issue_flags); if (!(req->flags & REQ_F_POLLED) &&
if (IS_ERR(kmsg)) (sr->flags & IORING_RECVSEND_POLL_FIRST))
return PTR_ERR(kmsg); return -EAGAIN;
ret = import_ubuf(ITER_SOURCE, sr->buf, len, &kmsg->msg.msg_iter);
if (unlikely(ret))
return ret;
flags = sr->msg_flags; flags = sr->msg_flags;
if (issue_flags & IO_URING_F_NONBLOCK) if (issue_flags & IO_URING_F_NONBLOCK)
...@@ -1084,7 +1033,7 @@ int io_send_zc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) ...@@ -1084,7 +1033,7 @@ int io_send_zc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
if (req->ctx->compat) if (req->ctx->compat)
zc->msg_flags |= MSG_CMSG_COMPAT; zc->msg_flags |= MSG_CMSG_COMPAT;
#endif #endif
return 0; return io_sendmsg_prep_setup(req, req->opcode == IORING_OP_SENDMSG_ZC);
} }
static int io_sg_from_iter_iovec(struct sock *sk, struct sk_buff *skb, static int io_sg_from_iter_iovec(struct sock *sk, struct sk_buff *skb,
...@@ -1173,7 +1122,7 @@ static int io_send_zc_import(struct io_kiocb *req, struct io_async_msghdr *kmsg) ...@@ -1173,7 +1122,7 @@ static int io_send_zc_import(struct io_kiocb *req, struct io_async_msghdr *kmsg)
int io_send_zc(struct io_kiocb *req, unsigned int issue_flags) int io_send_zc(struct io_kiocb *req, unsigned int issue_flags)
{ {
struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg); struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg);
struct io_async_msghdr *kmsg; struct io_async_msghdr *kmsg = req->async_data;
struct socket *sock; struct socket *sock;
unsigned msg_flags; unsigned msg_flags;
int ret, min_ret = 0; int ret, min_ret = 0;
...@@ -1184,9 +1133,9 @@ int io_send_zc(struct io_kiocb *req, unsigned int issue_flags) ...@@ -1184,9 +1133,9 @@ int io_send_zc(struct io_kiocb *req, unsigned int issue_flags)
if (!test_bit(SOCK_SUPPORT_ZC, &sock->flags)) if (!test_bit(SOCK_SUPPORT_ZC, &sock->flags))
return -EOPNOTSUPP; return -EOPNOTSUPP;
kmsg = io_send_setup(req, issue_flags); if (!(req->flags & REQ_F_POLLED) &&
if (IS_ERR(kmsg)) (zc->flags & IORING_RECVSEND_POLL_FIRST))
return PTR_ERR(kmsg); return -EAGAIN;
if (!zc->done_io) { if (!zc->done_io) {
ret = io_send_zc_import(req, kmsg); ret = io_send_zc_import(req, kmsg);
...@@ -1242,7 +1191,7 @@ int io_send_zc(struct io_kiocb *req, unsigned int issue_flags) ...@@ -1242,7 +1191,7 @@ int io_send_zc(struct io_kiocb *req, unsigned int issue_flags)
int io_sendmsg_zc(struct io_kiocb *req, unsigned int issue_flags) int io_sendmsg_zc(struct io_kiocb *req, unsigned int issue_flags)
{ {
struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
struct io_async_msghdr *kmsg; struct io_async_msghdr *kmsg = req->async_data;
struct socket *sock; struct socket *sock;
unsigned flags; unsigned flags;
int ret, min_ret = 0; int ret, min_ret = 0;
...@@ -1255,17 +1204,6 @@ int io_sendmsg_zc(struct io_kiocb *req, unsigned int issue_flags) ...@@ -1255,17 +1204,6 @@ int io_sendmsg_zc(struct io_kiocb *req, unsigned int issue_flags)
if (!test_bit(SOCK_SUPPORT_ZC, &sock->flags)) if (!test_bit(SOCK_SUPPORT_ZC, &sock->flags))
return -EOPNOTSUPP; return -EOPNOTSUPP;
if (req_has_async_data(req)) {
kmsg = req->async_data;
} else {
kmsg = io_msg_alloc_async(req, issue_flags);
if (unlikely(!kmsg))
return -ENOMEM;
ret = io_sendmsg_copy_hdr(req, kmsg);
if (ret)
return ret;
}
if (!(req->flags & REQ_F_POLLED) && if (!(req->flags & REQ_F_POLLED) &&
(sr->flags & IORING_RECVSEND_POLL_FIRST)) (sr->flags & IORING_RECVSEND_POLL_FIRST))
return -EAGAIN; return -EAGAIN;
......
...@@ -34,13 +34,11 @@ struct io_async_connect { ...@@ -34,13 +34,11 @@ struct io_async_connect {
int io_shutdown_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe); int io_shutdown_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
int io_shutdown(struct io_kiocb *req, unsigned int issue_flags); int io_shutdown(struct io_kiocb *req, unsigned int issue_flags);
int io_sendmsg_prep_async(struct io_kiocb *req);
void io_sendmsg_recvmsg_cleanup(struct io_kiocb *req); void io_sendmsg_recvmsg_cleanup(struct io_kiocb *req);
int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe); int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags); int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags);
int io_send(struct io_kiocb *req, unsigned int issue_flags); int io_send(struct io_kiocb *req, unsigned int issue_flags);
int io_sendrecv_prep_async(struct io_kiocb *req);
int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe); int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags); int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags);
......
...@@ -527,7 +527,6 @@ const struct io_cold_def io_cold_defs[] = { ...@@ -527,7 +527,6 @@ const struct io_cold_def io_cold_defs[] = {
.name = "SENDMSG", .name = "SENDMSG",
#if defined(CONFIG_NET) #if defined(CONFIG_NET)
.async_size = sizeof(struct io_async_msghdr), .async_size = sizeof(struct io_async_msghdr),
.prep_async = io_sendmsg_prep_async,
.cleanup = io_sendmsg_recvmsg_cleanup, .cleanup = io_sendmsg_recvmsg_cleanup,
.fail = io_sendrecv_fail, .fail = io_sendrecv_fail,
#endif #endif
...@@ -603,7 +602,6 @@ const struct io_cold_def io_cold_defs[] = { ...@@ -603,7 +602,6 @@ const struct io_cold_def io_cold_defs[] = {
.async_size = sizeof(struct io_async_msghdr), .async_size = sizeof(struct io_async_msghdr),
.cleanup = io_sendmsg_recvmsg_cleanup, .cleanup = io_sendmsg_recvmsg_cleanup,
.fail = io_sendrecv_fail, .fail = io_sendrecv_fail,
.prep_async = io_sendrecv_prep_async,
#endif #endif
}, },
[IORING_OP_RECV] = { [IORING_OP_RECV] = {
...@@ -688,7 +686,6 @@ const struct io_cold_def io_cold_defs[] = { ...@@ -688,7 +686,6 @@ const struct io_cold_def io_cold_defs[] = {
.name = "SEND_ZC", .name = "SEND_ZC",
#if defined(CONFIG_NET) #if defined(CONFIG_NET)
.async_size = sizeof(struct io_async_msghdr), .async_size = sizeof(struct io_async_msghdr),
.prep_async = io_sendrecv_prep_async,
.cleanup = io_send_zc_cleanup, .cleanup = io_send_zc_cleanup,
.fail = io_sendrecv_fail, .fail = io_sendrecv_fail,
#endif #endif
...@@ -697,7 +694,6 @@ const struct io_cold_def io_cold_defs[] = { ...@@ -697,7 +694,6 @@ const struct io_cold_def io_cold_defs[] = {
.name = "SENDMSG_ZC", .name = "SENDMSG_ZC",
#if defined(CONFIG_NET) #if defined(CONFIG_NET)
.async_size = sizeof(struct io_async_msghdr), .async_size = sizeof(struct io_async_msghdr),
.prep_async = io_sendmsg_prep_async,
.cleanup = io_send_zc_cleanup, .cleanup = io_send_zc_cleanup,
.fail = io_sendrecv_fail, .fail = io_sendrecv_fail,
#endif #endif
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment