Commit ce21a029 authored by Jason Wang's avatar Jason Wang Committed by David S. Miller

vhost_net: determine whether or not to use zerocopy at one time

Currently, even if the packet length is smaller than VHOST_GOODCOPY_LEN, if
upend_idx != done_idx we still set zcopy_used to true and rollback this choice
later. This could be avoided by determining zerocopy once by checking all
conditions at one time before.
Signed-off-by: default avatarJason Wang <jasowang@redhat.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent c49e4e57
...@@ -404,43 +404,36 @@ static void handle_tx(struct vhost_net *net) ...@@ -404,43 +404,36 @@ static void handle_tx(struct vhost_net *net)
iov_length(nvq->hdr, s), hdr_size); iov_length(nvq->hdr, s), hdr_size);
break; break;
} }
zcopy_used = zcopy && (len >= VHOST_GOODCOPY_LEN ||
nvq->upend_idx != nvq->done_idx); zcopy_used = zcopy && len >= VHOST_GOODCOPY_LEN
&& (nvq->upend_idx + 1) % UIO_MAXIOV !=
nvq->done_idx
&& vhost_net_tx_select_zcopy(net);
/* use msg_control to pass vhost zerocopy ubuf info to skb */ /* use msg_control to pass vhost zerocopy ubuf info to skb */
if (zcopy_used) { if (zcopy_used) {
struct ubuf_info *ubuf;
ubuf = nvq->ubuf_info + nvq->upend_idx;
vq->heads[nvq->upend_idx].id = head; vq->heads[nvq->upend_idx].id = head;
if (!vhost_net_tx_select_zcopy(net) || vq->heads[nvq->upend_idx].len = VHOST_DMA_IN_PROGRESS;
len < VHOST_GOODCOPY_LEN) { ubuf->callback = vhost_zerocopy_callback;
/* copy don't need to wait for DMA done */ ubuf->ctx = nvq->ubufs;
vq->heads[nvq->upend_idx].len = ubuf->desc = nvq->upend_idx;
VHOST_DMA_DONE_LEN; msg.msg_control = ubuf;
msg.msg_control = NULL; msg.msg_controllen = sizeof(ubuf);
msg.msg_controllen = 0; ubufs = nvq->ubufs;
ubufs = NULL; kref_get(&ubufs->kref);
} else {
struct ubuf_info *ubuf;
ubuf = nvq->ubuf_info + nvq->upend_idx;
vq->heads[nvq->upend_idx].len =
VHOST_DMA_IN_PROGRESS;
ubuf->callback = vhost_zerocopy_callback;
ubuf->ctx = nvq->ubufs;
ubuf->desc = nvq->upend_idx;
msg.msg_control = ubuf;
msg.msg_controllen = sizeof(ubuf);
ubufs = nvq->ubufs;
kref_get(&ubufs->kref);
}
nvq->upend_idx = (nvq->upend_idx + 1) % UIO_MAXIOV; nvq->upend_idx = (nvq->upend_idx + 1) % UIO_MAXIOV;
} else } else {
msg.msg_control = NULL; msg.msg_control = NULL;
ubufs = NULL;
}
/* TODO: Check specific error and bomb out unless ENOBUFS? */ /* TODO: Check specific error and bomb out unless ENOBUFS? */
err = sock->ops->sendmsg(NULL, sock, &msg, len); err = sock->ops->sendmsg(NULL, sock, &msg, len);
if (unlikely(err < 0)) { if (unlikely(err < 0)) {
if (zcopy_used) { if (zcopy_used) {
if (ubufs) vhost_net_ubuf_put(ubufs);
vhost_net_ubuf_put(ubufs);
nvq->upend_idx = ((unsigned)nvq->upend_idx - 1) nvq->upend_idx = ((unsigned)nvq->upend_idx - 1)
% UIO_MAXIOV; % UIO_MAXIOV;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment