Commit 81cf1ade authored by David S. Miller's avatar David S. Miller

Merge branch 'tcp-io_uring-zc-opts'

Merge branch 'tcp-io_uring-zc-opts'

Pavel Begunkov says:

====================
minor tcp io_uring zc optimisations

Patch 1 is a simple cleanup, patch 2 gives removes 2 atomics from the
io_uring zc TCP submission path, which yielded extra 0.5% for my
throughput CPU bound tests based on liburing/examples/send-zerocopy.c
====================
Reviewed-by: default avatarDavid Ahern <dsahern@kernel.org>
Reviewed-by: default avatarEric Dumazet <edumazet@google.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 833e24ae a7533584
......@@ -1229,13 +1229,11 @@ int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size)
flags = msg->msg_flags;
if ((flags & MSG_ZEROCOPY) && size) {
skb = tcp_write_queue_tail(sk);
if (msg->msg_ubuf) {
uarg = msg->msg_ubuf;
net_zcopy_get(uarg);
zc = sk->sk_route_caps & NETIF_F_SG;
} else if (sock_flag(sk, SOCK_ZEROCOPY)) {
skb = tcp_write_queue_tail(sk);
uarg = msg_zerocopy_realloc(sk, size, skb_zcopy(skb));
if (!uarg) {
err = -ENOBUFS;
......@@ -1459,6 +1457,8 @@ int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size)
tcp_push(sk, flags, mss_now, tp->nonagle, size_goal);
}
out_nopush:
/* msg->msg_ubuf is pinned by the caller so we don't take extra refs */
if (uarg && !msg->msg_ubuf)
net_zcopy_put(uarg);
return copied + copied_syn;
......@@ -1468,6 +1468,8 @@ int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size)
if (copied + copied_syn)
goto out;
out_err:
/* msg->msg_ubuf is pinned by the caller so we don't take extra refs */
if (uarg && !msg->msg_ubuf)
net_zcopy_put_abort(uarg, true);
err = sk_stream_error(sk, flags, err);
/* make sure we wake any epoll edge trigger waiter */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment