Commit aeb320fc authored by Pavel Begunkov's avatar Pavel Begunkov Committed by Paolo Abeni

net: batch zerocopy_fill_skb_from_iter accounting

Instead of accounting every page range against the socket separately, do
it in batch based on the change in skb->truesize. It's also moved into
__zerocopy_sg_from_iter(), so that zerocopy_fill_skb_from_iter() is
simpler and responsible for setting frags but not the accounting.
Signed-off-by: default avatarPavel Begunkov <asml.silence@gmail.com>
Reviewed-by: default avatarWillem de Bruijn <willemb@google.com>
Reviewed-by: default avatarJens Axboe <axboe@kernel.dk>
Signed-off-by: default avatarPaolo Abeni <pabeni@redhat.com>
parent 7fb05423
...@@ -610,7 +610,7 @@ int skb_copy_datagram_from_iter(struct sk_buff *skb, int offset, ...@@ -610,7 +610,7 @@ int skb_copy_datagram_from_iter(struct sk_buff *skb, int offset,
} }
EXPORT_SYMBOL(skb_copy_datagram_from_iter); EXPORT_SYMBOL(skb_copy_datagram_from_iter);
static int zerocopy_fill_skb_from_iter(struct sock *sk, struct sk_buff *skb, static int zerocopy_fill_skb_from_iter(struct sk_buff *skb,
struct iov_iter *from, size_t length) struct iov_iter *from, size_t length)
{ {
int frag = skb_shinfo(skb)->nr_frags; int frag = skb_shinfo(skb)->nr_frags;
...@@ -621,7 +621,6 @@ static int zerocopy_fill_skb_from_iter(struct sock *sk, struct sk_buff *skb, ...@@ -621,7 +621,6 @@ static int zerocopy_fill_skb_from_iter(struct sock *sk, struct sk_buff *skb,
int refs, order, n = 0; int refs, order, n = 0;
size_t start; size_t start;
ssize_t copied; ssize_t copied;
unsigned long truesize;
if (frag == MAX_SKB_FRAGS) if (frag == MAX_SKB_FRAGS)
return -EMSGSIZE; return -EMSGSIZE;
...@@ -633,17 +632,9 @@ static int zerocopy_fill_skb_from_iter(struct sock *sk, struct sk_buff *skb, ...@@ -633,17 +632,9 @@ static int zerocopy_fill_skb_from_iter(struct sock *sk, struct sk_buff *skb,
length -= copied; length -= copied;
truesize = PAGE_ALIGN(copied + start);
skb->data_len += copied; skb->data_len += copied;
skb->len += copied; skb->len += copied;
skb->truesize += truesize; skb->truesize += PAGE_ALIGN(copied + start);
if (sk && sk->sk_type == SOCK_STREAM) {
sk_wmem_queued_add(sk, truesize);
if (!skb_zcopy_pure(skb))
sk_mem_charge(sk, truesize);
} else {
refcount_add(truesize, &skb->sk->sk_wmem_alloc);
}
head = compound_head(pages[n]); head = compound_head(pages[n]);
order = compound_order(head); order = compound_order(head);
...@@ -691,10 +682,24 @@ int __zerocopy_sg_from_iter(struct msghdr *msg, struct sock *sk, ...@@ -691,10 +682,24 @@ int __zerocopy_sg_from_iter(struct msghdr *msg, struct sock *sk,
struct sk_buff *skb, struct iov_iter *from, struct sk_buff *skb, struct iov_iter *from,
size_t length) size_t length)
{ {
unsigned long orig_size = skb->truesize;
unsigned long truesize;
int ret;
if (msg && msg->msg_ubuf && msg->sg_from_iter) if (msg && msg->msg_ubuf && msg->sg_from_iter)
return msg->sg_from_iter(sk, skb, from, length); return msg->sg_from_iter(sk, skb, from, length);
else
return zerocopy_fill_skb_from_iter(sk, skb, from, length); ret = zerocopy_fill_skb_from_iter(skb, from, length);
truesize = skb->truesize - orig_size;
if (sk && sk->sk_type == SOCK_STREAM) {
sk_wmem_queued_add(sk, truesize);
if (!skb_zcopy_pure(skb))
sk_mem_charge(sk, truesize);
} else {
refcount_add(truesize, &skb->sk->sk_wmem_alloc);
}
return ret;
} }
EXPORT_SYMBOL(__zerocopy_sg_from_iter); EXPORT_SYMBOL(__zerocopy_sg_from_iter);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment