Commit f5a4941a authored by Jason Wang's avatar Jason Wang Committed by David S. Miller

vhost_net: flush batched heads before trying to busy polling

After commit e2b3b35e ("vhost_net: batch used ring update in rx"),
we tend to batch updating used heads. But it doesn't flush batched
heads before trying to do busy polling, this will cause vhost to wait
for guest TX which waits for the used RX. Fixing by flush batched
heads before busy loop.

1 byte TCP_RR performance recovers from 13107.83 to 50402.65.

Fixes: e2b3b35e ("vhost_net: batch used ring update in rx")
Signed-off-by: default avatarJason Wang <jasowang@redhat.com>
Acked-by: default avatarMichael S. Tsirkin <mst@redhat.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 6547e387
...@@ -105,7 +105,9 @@ struct vhost_net_virtqueue { ...@@ -105,7 +105,9 @@ struct vhost_net_virtqueue {
/* vhost zerocopy support fields below: */ /* vhost zerocopy support fields below: */
/* last used idx for outstanding DMA zerocopy buffers */ /* last used idx for outstanding DMA zerocopy buffers */
int upend_idx; int upend_idx;
/* first used idx for DMA done zerocopy buffers */ /* For TX, first used idx for DMA done zerocopy buffers
* For RX, number of batched heads
*/
int done_idx; int done_idx;
/* an array of userspace buffers info */ /* an array of userspace buffers info */
struct ubuf_info *ubuf_info; struct ubuf_info *ubuf_info;
...@@ -626,6 +628,18 @@ static int sk_has_rx_data(struct sock *sk) ...@@ -626,6 +628,18 @@ static int sk_has_rx_data(struct sock *sk)
return skb_queue_empty(&sk->sk_receive_queue); return skb_queue_empty(&sk->sk_receive_queue);
} }
static void vhost_rx_signal_used(struct vhost_net_virtqueue *nvq)
{
struct vhost_virtqueue *vq = &nvq->vq;
struct vhost_dev *dev = vq->dev;
if (!nvq->done_idx)
return;
vhost_add_used_and_signal_n(dev, vq, vq->heads, nvq->done_idx);
nvq->done_idx = 0;
}
static int vhost_net_rx_peek_head_len(struct vhost_net *net, struct sock *sk) static int vhost_net_rx_peek_head_len(struct vhost_net *net, struct sock *sk)
{ {
struct vhost_net_virtqueue *rvq = &net->vqs[VHOST_NET_VQ_RX]; struct vhost_net_virtqueue *rvq = &net->vqs[VHOST_NET_VQ_RX];
...@@ -635,6 +649,8 @@ static int vhost_net_rx_peek_head_len(struct vhost_net *net, struct sock *sk) ...@@ -635,6 +649,8 @@ static int vhost_net_rx_peek_head_len(struct vhost_net *net, struct sock *sk)
int len = peek_head_len(rvq, sk); int len = peek_head_len(rvq, sk);
if (!len && vq->busyloop_timeout) { if (!len && vq->busyloop_timeout) {
/* Flush batched heads first */
vhost_rx_signal_used(rvq);
/* Both tx vq and rx socket were polled here */ /* Both tx vq and rx socket were polled here */
mutex_lock_nested(&vq->mutex, 1); mutex_lock_nested(&vq->mutex, 1);
vhost_disable_notify(&net->dev, vq); vhost_disable_notify(&net->dev, vq);
...@@ -762,7 +778,7 @@ static void handle_rx(struct vhost_net *net) ...@@ -762,7 +778,7 @@ static void handle_rx(struct vhost_net *net)
}; };
size_t total_len = 0; size_t total_len = 0;
int err, mergeable; int err, mergeable;
s16 headcount, nheads = 0; s16 headcount;
size_t vhost_hlen, sock_hlen; size_t vhost_hlen, sock_hlen;
size_t vhost_len, sock_len; size_t vhost_len, sock_len;
struct socket *sock; struct socket *sock;
...@@ -790,8 +806,8 @@ static void handle_rx(struct vhost_net *net) ...@@ -790,8 +806,8 @@ static void handle_rx(struct vhost_net *net)
while ((sock_len = vhost_net_rx_peek_head_len(net, sock->sk))) { while ((sock_len = vhost_net_rx_peek_head_len(net, sock->sk))) {
sock_len += sock_hlen; sock_len += sock_hlen;
vhost_len = sock_len + vhost_hlen; vhost_len = sock_len + vhost_hlen;
headcount = get_rx_bufs(vq, vq->heads + nheads, vhost_len, headcount = get_rx_bufs(vq, vq->heads + nvq->done_idx,
&in, vq_log, &log, vhost_len, &in, vq_log, &log,
likely(mergeable) ? UIO_MAXIOV : 1); likely(mergeable) ? UIO_MAXIOV : 1);
/* On error, stop handling until the next kick. */ /* On error, stop handling until the next kick. */
if (unlikely(headcount < 0)) if (unlikely(headcount < 0))
...@@ -862,12 +878,9 @@ static void handle_rx(struct vhost_net *net) ...@@ -862,12 +878,9 @@ static void handle_rx(struct vhost_net *net)
vhost_discard_vq_desc(vq, headcount); vhost_discard_vq_desc(vq, headcount);
goto out; goto out;
} }
nheads += headcount; nvq->done_idx += headcount;
if (nheads > VHOST_RX_BATCH) { if (nvq->done_idx > VHOST_RX_BATCH)
vhost_add_used_and_signal_n(&net->dev, vq, vq->heads, vhost_rx_signal_used(nvq);
nheads);
nheads = 0;
}
if (unlikely(vq_log)) if (unlikely(vq_log))
vhost_log_write(vq, vq_log, log, vhost_len); vhost_log_write(vq, vq_log, log, vhost_len);
total_len += vhost_len; total_len += vhost_len;
...@@ -878,9 +891,7 @@ static void handle_rx(struct vhost_net *net) ...@@ -878,9 +891,7 @@ static void handle_rx(struct vhost_net *net)
} }
vhost_net_enable_vq(net, vq); vhost_net_enable_vq(net, vq);
out: out:
if (nheads) vhost_rx_signal_used(nvq);
vhost_add_used_and_signal_n(&net->dev, vq, vq->heads,
nheads);
mutex_unlock(&vq->mutex); mutex_unlock(&vq->mutex);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment