Commit f1a456f8 authored by Talal Ahmad's avatar Talal Ahmad Committed by Jakub Kicinski

net: avoid double accounting for pure zerocopy skbs

Track skbs with only zerocopy data and avoid charging them to kernel
memory to correctly account the memory utilization for msg_zerocopy.
All of the data in such skbs is held in user pages which are already
accounted to user. Before this change, they are charged again in
kernel in __zerocopy_sg_from_iter. The charging in kernel is
excessive because data is not being copied into skb frags. This
excessive charging can lead to kernel going into memory pressure
state which impacts all sockets in the system adversely. Mark pure
zerocopy skbs with a SKBFL_PURE_ZEROCOPY flag and remove
charge/uncharge for data in such skbs.

Initially, an skb is marked pure zerocopy when it is empty and in
zerocopy path. skb can then change from a pure zerocopy skb to mixed
data skb (zerocopy and copy data) if it is at tail of write queue and
there is room available in it and non-zerocopy data is being sent in
the next sendmsg call. At this time sk_mem_charge is done for the pure
zerocopied data and the pure zerocopy flag is unmarked. We found that
this happens very rarely on workloads that pass MSG_ZEROCOPY.

A pure zerocopy skb can later be coalesced into normal skb if they are
next to each other in queue but this patch prevents coalescing from
happening. This avoids complexity of charging when skb downgrades from
pure zerocopy to mixed. This is also rare.

In sk_wmem_free_skb, if it is a pure zerocopy skb, an sk_mem_uncharge
for SKB_TRUESIZE(MAX_TCP_HEADER) is done for sk_mem_charge in
tcp_skb_entail for an skb without data.

Testing with the msg_zerocopy.c benchmark between two hosts(100G nics)
with zerocopy showed that before this patch the 'sock' variable in
memory.stat for cgroup2 that tracks sum of sk_forward_alloc,
sk_rmem_alloc and sk_wmem_queued is around 1822720 and with this
change it is 0. This is due to no charge to sk_forward_alloc for
zerocopy data and shows memory utilization for kernel is lowered.
Signed-off-by: default avatarTalal Ahmad <talalahmad@google.com>
Acked-by: default avatarArjun Roy <arjunroy@google.com>
Acked-by: default avatarSoheil Hassas Yeganeh <soheil@google.com>
Signed-off-by: default avatarWillem de Bruijn <willemb@google.com>
Signed-off-by: default avatarEric Dumazet <edumazet@google.com>
Signed-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parent 03271f3a
...@@ -454,9 +454,15 @@ enum { ...@@ -454,9 +454,15 @@ enum {
* all frags to avoid possible bad checksum * all frags to avoid possible bad checksum
*/ */
SKBFL_SHARED_FRAG = BIT(1), SKBFL_SHARED_FRAG = BIT(1),
/* segment contains only zerocopy data and should not be
* charged to the kernel memory.
*/
SKBFL_PURE_ZEROCOPY = BIT(2),
}; };
#define SKBFL_ZEROCOPY_FRAG (SKBFL_ZEROCOPY_ENABLE | SKBFL_SHARED_FRAG) #define SKBFL_ZEROCOPY_FRAG (SKBFL_ZEROCOPY_ENABLE | SKBFL_SHARED_FRAG)
#define SKBFL_ALL_ZEROCOPY (SKBFL_ZEROCOPY_FRAG | SKBFL_PURE_ZEROCOPY)
/* /*
* The callback notifies userspace to release buffers when skb DMA is done in * The callback notifies userspace to release buffers when skb DMA is done in
...@@ -1464,6 +1470,17 @@ static inline struct ubuf_info *skb_zcopy(struct sk_buff *skb) ...@@ -1464,6 +1470,17 @@ static inline struct ubuf_info *skb_zcopy(struct sk_buff *skb)
return is_zcopy ? skb_uarg(skb) : NULL; return is_zcopy ? skb_uarg(skb) : NULL;
} }
static inline bool skb_zcopy_pure(const struct sk_buff *skb)
{
return skb_shinfo(skb)->flags & SKBFL_PURE_ZEROCOPY;
}
static inline bool skb_pure_zcopy_same(const struct sk_buff *skb1,
const struct sk_buff *skb2)
{
return skb_zcopy_pure(skb1) == skb_zcopy_pure(skb2);
}
static inline void net_zcopy_get(struct ubuf_info *uarg) static inline void net_zcopy_get(struct ubuf_info *uarg)
{ {
refcount_inc(&uarg->refcnt); refcount_inc(&uarg->refcnt);
...@@ -1528,7 +1545,7 @@ static inline void skb_zcopy_clear(struct sk_buff *skb, bool zerocopy_success) ...@@ -1528,7 +1545,7 @@ static inline void skb_zcopy_clear(struct sk_buff *skb, bool zerocopy_success)
if (!skb_zcopy_is_nouarg(skb)) if (!skb_zcopy_is_nouarg(skb))
uarg->callback(skb, uarg, zerocopy_success); uarg->callback(skb, uarg, zerocopy_success);
skb_shinfo(skb)->flags &= ~SKBFL_ZEROCOPY_FRAG; skb_shinfo(skb)->flags &= ~SKBFL_ALL_ZEROCOPY;
} }
} }
......
...@@ -293,7 +293,10 @@ static inline bool tcp_out_of_memory(struct sock *sk) ...@@ -293,7 +293,10 @@ static inline bool tcp_out_of_memory(struct sock *sk)
static inline void tcp_wmem_free_skb(struct sock *sk, struct sk_buff *skb) static inline void tcp_wmem_free_skb(struct sock *sk, struct sk_buff *skb)
{ {
sk_wmem_queued_add(sk, -skb->truesize); sk_wmem_queued_add(sk, -skb->truesize);
sk_mem_uncharge(sk, skb->truesize); if (!skb_zcopy_pure(skb))
sk_mem_uncharge(sk, skb->truesize);
else
sk_mem_uncharge(sk, SKB_TRUESIZE(MAX_TCP_HEADER));
__kfree_skb(skb); __kfree_skb(skb);
} }
...@@ -974,7 +977,8 @@ static inline bool tcp_skb_can_collapse(const struct sk_buff *to, ...@@ -974,7 +977,8 @@ static inline bool tcp_skb_can_collapse(const struct sk_buff *to,
const struct sk_buff *from) const struct sk_buff *from)
{ {
return likely(tcp_skb_can_collapse_to(to) && return likely(tcp_skb_can_collapse_to(to) &&
mptcp_skb_can_collapse(to, from)); mptcp_skb_can_collapse(to, from) &&
skb_pure_zcopy_same(to, from));
} }
/* Events passed to congestion control interface */ /* Events passed to congestion control interface */
......
...@@ -646,7 +646,8 @@ int __zerocopy_sg_from_iter(struct sock *sk, struct sk_buff *skb, ...@@ -646,7 +646,8 @@ int __zerocopy_sg_from_iter(struct sock *sk, struct sk_buff *skb,
skb->truesize += truesize; skb->truesize += truesize;
if (sk && sk->sk_type == SOCK_STREAM) { if (sk && sk->sk_type == SOCK_STREAM) {
sk_wmem_queued_add(sk, truesize); sk_wmem_queued_add(sk, truesize);
sk_mem_charge(sk, truesize); if (!skb_zcopy_pure(skb))
sk_mem_charge(sk, truesize);
} else { } else {
refcount_add(truesize, &skb->sk->sk_wmem_alloc); refcount_add(truesize, &skb->sk->sk_wmem_alloc);
} }
......
...@@ -3433,8 +3433,9 @@ static inline void skb_split_no_header(struct sk_buff *skb, ...@@ -3433,8 +3433,9 @@ static inline void skb_split_no_header(struct sk_buff *skb,
void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len) void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len)
{ {
int pos = skb_headlen(skb); int pos = skb_headlen(skb);
const int zc_flags = SKBFL_SHARED_FRAG | SKBFL_PURE_ZEROCOPY;
skb_shinfo(skb1)->flags |= skb_shinfo(skb)->flags & SKBFL_SHARED_FRAG; skb_shinfo(skb1)->flags |= skb_shinfo(skb)->flags & zc_flags;
skb_zerocopy_clone(skb1, skb, 0); skb_zerocopy_clone(skb1, skb, 0);
if (len < pos) /* Split line is inside header. */ if (len < pos) /* Split line is inside header. */
skb_split_inside_header(skb, skb1, len, pos); skb_split_inside_header(skb, skb1, len, pos);
......
...@@ -863,6 +863,7 @@ struct sk_buff *tcp_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp, ...@@ -863,6 +863,7 @@ struct sk_buff *tcp_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp,
if (likely(skb)) { if (likely(skb)) {
bool mem_scheduled; bool mem_scheduled;
skb->truesize = SKB_TRUESIZE(size + MAX_TCP_HEADER);
if (force_schedule) { if (force_schedule) {
mem_scheduled = true; mem_scheduled = true;
sk_forced_mem_schedule(sk, skb->truesize); sk_forced_mem_schedule(sk, skb->truesize);
...@@ -1319,6 +1320,15 @@ int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size) ...@@ -1319,6 +1320,15 @@ int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size)
copy = min_t(int, copy, pfrag->size - pfrag->offset); copy = min_t(int, copy, pfrag->size - pfrag->offset);
/* skb changing from pure zc to mixed, must charge zc */
if (unlikely(skb_zcopy_pure(skb))) {
if (!sk_wmem_schedule(sk, skb->data_len))
goto wait_for_space;
sk_mem_charge(sk, skb->data_len);
skb_shinfo(skb)->flags &= ~SKBFL_PURE_ZEROCOPY;
}
if (!sk_wmem_schedule(sk, copy)) if (!sk_wmem_schedule(sk, copy))
goto wait_for_space; goto wait_for_space;
...@@ -1339,8 +1349,16 @@ int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size) ...@@ -1339,8 +1349,16 @@ int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size)
} }
pfrag->offset += copy; pfrag->offset += copy;
} else { } else {
if (!sk_wmem_schedule(sk, copy)) /* First append to a fragless skb builds initial
goto wait_for_space; * pure zerocopy skb
*/
if (!skb->len)
skb_shinfo(skb)->flags |= SKBFL_PURE_ZEROCOPY;
if (!skb_zcopy_pure(skb)) {
if (!sk_wmem_schedule(sk, copy))
goto wait_for_space;
}
err = skb_zerocopy_iter_stream(sk, skb, msg, copy, uarg); err = skb_zerocopy_iter_stream(sk, skb, msg, copy, uarg);
if (err == -EMSGSIZE || err == -EEXIST) { if (err == -EMSGSIZE || err == -EEXIST) {
......
...@@ -1677,7 +1677,8 @@ int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len) ...@@ -1677,7 +1677,8 @@ int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len)
if (delta_truesize) { if (delta_truesize) {
skb->truesize -= delta_truesize; skb->truesize -= delta_truesize;
sk_wmem_queued_add(sk, -delta_truesize); sk_wmem_queued_add(sk, -delta_truesize);
sk_mem_uncharge(sk, delta_truesize); if (!skb_zcopy_pure(skb))
sk_mem_uncharge(sk, delta_truesize);
} }
/* Any change of skb->len requires recalculation of tso factor. */ /* Any change of skb->len requires recalculation of tso factor. */
...@@ -2295,7 +2296,9 @@ static bool tcp_can_coalesce_send_queue_head(struct sock *sk, int len) ...@@ -2295,7 +2296,9 @@ static bool tcp_can_coalesce_send_queue_head(struct sock *sk, int len)
if (len <= skb->len) if (len <= skb->len)
break; break;
if (unlikely(TCP_SKB_CB(skb)->eor) || tcp_has_tx_tstamp(skb)) if (unlikely(TCP_SKB_CB(skb)->eor) ||
tcp_has_tx_tstamp(skb) ||
!skb_pure_zcopy_same(skb, next))
return false; return false;
len -= skb->len; len -= skb->len;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment