Commit 604ca8ee authored by David S. Miller's avatar David S. Miller

Merge branch 'virtio-net-dynamic-coalescing-moderation'

Heng Qi says:

====================
virtio-net: support dynamic coalescing moderation

Now, virtio-net already supports per-queue moderation parameter
setting. Based on this, we use the linux dimlib to support
dynamic coalescing moderation for virtio-net.

Due to some scheduling issues, we only support and test the rx dim.

Some test results:

I. Sockperf UDP
=================================================
1. Env
rxq_0 with affinity to cpu_0.

2. Cmd
client: taskset -c 0 sockperf tp -p 8989 -i $IP -t 10 -m 16B
server: taskset -c 0 sockperf sr -p 8989

3. Result
dim off: 1143277.00 rxpps, throughput 17.844 MBps, cpu is 100%.
dim on:  1124161.00 rxpps, throughput 17.610 MBps, cpu is 83.5%.
=================================================

II. Redis
=================================================
1. Env
There are 8 rxqs, and rxq_i with affinity to cpu_i.

2. Result
When all cpus are 100%, ops/sec of memtier_benchmark client is
dim off:  978437.23
dim on:  1143638.28
=================================================

III. Nginx
=================================================
1. Env
There are 8 rxqs and rxq_i with affinity to cpu_i.

2. Result
When all cpus are 100%, requests/sec of wrk client is
dim off:  877931.67
dim on:  1019160.31
=================================================

IV. Latency of sockperf udp
=================================================
1. Rx cmd
taskset -c 0 sockperf sr -p 8989

2. Tx cmd
taskset -c 0 sockperf pp -i ${ip} -p 8989 -t 10

After running this cmd 5 times and averaging the results,

3. Result
dim off: 17.7735 usec
dim on:  18.0110 usec
=================================================

Changelog:
v7->v8:
- Add select DIMLIB.

v6->v7:
- Drop the patch titled "spin lock for ctrl cmd access"
- Use rtnl_trylock to avoid the deadlock.

v5->v6:
- Add patch(4/5): spin lock for ctrl cmd access
- Patch(5/5):
   - Use spin lock and cancel_work_sync to synchronize

v4->v5:
- Patch(4/4):
   - Fix possible synchronization issues with cancel_work_sync.
   - Reduce if/else nesting levels

v3->v4:
- Patch(5/5): drop.

v2->v3:
- Patch(4/5): some minor modifications.

v1->v2:
- Patch(2/5): a minor fix.
- Patch(4/5):
   - improve the judgment of dim switch conditions.
   - Cancel the work when vq reset.
- Patch(5/5): drop the tx dim implementation.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents d2e9464e 62087995
......@@ -434,6 +434,7 @@ config VIRTIO_NET
tristate "Virtio network driver"
depends on VIRTIO
select NET_FAILOVER
select DIMLIB
help
This is the virtual network driver for virtio. It can be used with
QEMU based VMMs (like KVM or Xen). Say Y or M.
......
......@@ -19,6 +19,7 @@
#include <linux/average.h>
#include <linux/filter.h>
#include <linux/kernel.h>
#include <linux/dim.h>
#include <net/route.h>
#include <net/xdp.h>
#include <net/net_failover.h>
......@@ -172,6 +173,17 @@ struct receive_queue {
struct virtnet_rq_stats stats;
/* The number of rx notifications */
u16 calls;
/* Is dynamic interrupt moderation enabled? */
bool dim_enabled;
/* Dynamic Interrupt Moderation */
struct dim dim;
u32 packets_in_napi;
struct virtnet_interrupt_coalesce intr_coal;
/* Chain pages by the private ptr. */
......@@ -305,6 +317,9 @@ struct virtnet_info {
u8 duplex;
u32 speed;
/* Is rx dynamic interrupt moderation enabled? */
bool rx_dim_enabled;
/* Interrupt coalescing settings */
struct virtnet_interrupt_coalesce intr_coal_tx;
struct virtnet_interrupt_coalesce intr_coal_rx;
......@@ -431,7 +446,7 @@ static void virtqueue_napi_schedule(struct napi_struct *napi,
}
}
static void virtqueue_napi_complete(struct napi_struct *napi,
static bool virtqueue_napi_complete(struct napi_struct *napi,
struct virtqueue *vq, int processed)
{
int opaque;
......@@ -440,9 +455,13 @@ static void virtqueue_napi_complete(struct napi_struct *napi,
if (napi_complete_done(napi, processed)) {
if (unlikely(virtqueue_poll(vq, opaque)))
virtqueue_napi_schedule(napi, vq);
else
return true;
} else {
virtqueue_disable_cb(vq);
}
return false;
}
static void skb_xmit_done(struct virtqueue *vq)
......@@ -1997,6 +2016,7 @@ static void skb_recv_done(struct virtqueue *rvq)
struct virtnet_info *vi = rvq->vdev->priv;
struct receive_queue *rq = &vi->rq[vq2rxq(rvq)];
rq->calls++;
virtqueue_napi_schedule(&rq->napi, rvq);
}
......@@ -2137,6 +2157,24 @@ static void virtnet_poll_cleantx(struct receive_queue *rq)
}
}
static void virtnet_rx_dim_update(struct virtnet_info *vi, struct receive_queue *rq)
{
struct dim_sample cur_sample = {};
if (!rq->packets_in_napi)
return;
u64_stats_update_begin(&rq->stats.syncp);
dim_update_sample(rq->calls,
u64_stats_read(&rq->stats.packets),
u64_stats_read(&rq->stats.bytes),
&cur_sample);
u64_stats_update_end(&rq->stats.syncp);
net_dim(&rq->dim, cur_sample);
rq->packets_in_napi = 0;
}
static int virtnet_poll(struct napi_struct *napi, int budget)
{
struct receive_queue *rq =
......@@ -2145,17 +2183,22 @@ static int virtnet_poll(struct napi_struct *napi, int budget)
struct send_queue *sq;
unsigned int received;
unsigned int xdp_xmit = 0;
bool napi_complete;
virtnet_poll_cleantx(rq);
received = virtnet_receive(rq, budget, &xdp_xmit);
rq->packets_in_napi += received;
if (xdp_xmit & VIRTIO_XDP_REDIR)
xdp_do_flush();
/* Out of packets? */
if (received < budget)
virtqueue_napi_complete(napi, rq->vq, received);
if (received < budget) {
napi_complete = virtqueue_napi_complete(napi, rq->vq, received);
if (napi_complete && rq->dim_enabled)
virtnet_rx_dim_update(vi, rq);
}
if (xdp_xmit & VIRTIO_XDP_TX) {
sq = virtnet_xdp_get_sq(vi);
......@@ -2226,8 +2269,11 @@ static int virtnet_open(struct net_device *dev)
disable_delayed_refill(vi);
cancel_delayed_work_sync(&vi->refill);
for (i--; i >= 0; i--)
for (i--; i >= 0; i--) {
virtnet_disable_queue_pair(vi, i);
cancel_work_sync(&vi->rq[i].dim.work);
}
return err;
}
......@@ -2389,8 +2435,10 @@ static int virtnet_rx_resize(struct virtnet_info *vi,
qindex = rq - vi->rq;
if (running)
if (running) {
napi_disable(&rq->napi);
cancel_work_sync(&rq->dim.work);
}
err = virtqueue_resize(rq->vq, ring_num, virtnet_rq_free_unused_buf);
if (err)
......@@ -2637,8 +2685,10 @@ static int virtnet_close(struct net_device *dev)
/* Make sure refill_work doesn't re-enable napi! */
cancel_delayed_work_sync(&vi->refill);
for (i = 0; i < vi->max_queue_pairs; i++)
for (i = 0; i < vi->max_queue_pairs; i++) {
virtnet_disable_queue_pair(vi, i);
cancel_work_sync(&vi->rq[i].dim.work);
}
return 0;
}
......@@ -2845,6 +2895,58 @@ static void virtnet_cpu_notif_remove(struct virtnet_info *vi)
&vi->node_dead);
}
static int virtnet_send_ctrl_coal_vq_cmd(struct virtnet_info *vi,
u16 vqn, u32 max_usecs, u32 max_packets)
{
struct scatterlist sgs;
vi->ctrl->coal_vq.vqn = cpu_to_le16(vqn);
vi->ctrl->coal_vq.coal.max_usecs = cpu_to_le32(max_usecs);
vi->ctrl->coal_vq.coal.max_packets = cpu_to_le32(max_packets);
sg_init_one(&sgs, &vi->ctrl->coal_vq, sizeof(vi->ctrl->coal_vq));
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_NOTF_COAL,
VIRTIO_NET_CTRL_NOTF_COAL_VQ_SET,
&sgs))
return -EINVAL;
return 0;
}
static int virtnet_send_rx_ctrl_coal_vq_cmd(struct virtnet_info *vi,
u16 queue, u32 max_usecs,
u32 max_packets)
{
int err;
err = virtnet_send_ctrl_coal_vq_cmd(vi, rxq2vq(queue),
max_usecs, max_packets);
if (err)
return err;
vi->rq[queue].intr_coal.max_usecs = max_usecs;
vi->rq[queue].intr_coal.max_packets = max_packets;
return 0;
}
static int virtnet_send_tx_ctrl_coal_vq_cmd(struct virtnet_info *vi,
u16 queue, u32 max_usecs,
u32 max_packets)
{
int err;
err = virtnet_send_ctrl_coal_vq_cmd(vi, txq2vq(queue),
max_usecs, max_packets);
if (err)
return err;
vi->sq[queue].intr_coal.max_usecs = max_usecs;
vi->sq[queue].intr_coal.max_packets = max_packets;
return 0;
}
static void virtnet_get_ringparam(struct net_device *dev,
struct ethtool_ringparam *ring,
struct kernel_ethtool_ringparam *kernel_ring,
......@@ -2858,9 +2960,6 @@ static void virtnet_get_ringparam(struct net_device *dev,
ring->tx_pending = virtqueue_get_vring_size(vi->sq[0].vq);
}
static int virtnet_send_ctrl_coal_vq_cmd(struct virtnet_info *vi,
u16 vqn, u32 max_usecs, u32 max_packets);
static int virtnet_set_ringparam(struct net_device *dev,
struct ethtool_ringparam *ring,
struct kernel_ethtool_ringparam *kernel_ring,
......@@ -2902,14 +3001,11 @@ static int virtnet_set_ringparam(struct net_device *dev,
* through the VIRTIO_NET_CTRL_NOTF_COAL_TX_SET command, or, if the driver
* did not set any TX coalescing parameters, to 0.
*/
err = virtnet_send_ctrl_coal_vq_cmd(vi, txq2vq(i),
vi->intr_coal_tx.max_usecs,
vi->intr_coal_tx.max_packets);
err = virtnet_send_tx_ctrl_coal_vq_cmd(vi, i,
vi->intr_coal_tx.max_usecs,
vi->intr_coal_tx.max_packets);
if (err)
return err;
vi->sq[i].intr_coal.max_usecs = vi->intr_coal_tx.max_usecs;
vi->sq[i].intr_coal.max_packets = vi->intr_coal_tx.max_packets;
}
if (ring->rx_pending != rx_pending) {
......@@ -2918,14 +3014,11 @@ static int virtnet_set_ringparam(struct net_device *dev,
return err;
/* The reason is same as the transmit virtqueue reset */
err = virtnet_send_ctrl_coal_vq_cmd(vi, rxq2vq(i),
vi->intr_coal_rx.max_usecs,
vi->intr_coal_rx.max_packets);
err = virtnet_send_rx_ctrl_coal_vq_cmd(vi, i,
vi->intr_coal_rx.max_usecs,
vi->intr_coal_rx.max_packets);
if (err)
return err;
vi->rq[i].intr_coal.max_usecs = vi->intr_coal_rx.max_usecs;
vi->rq[i].intr_coal.max_packets = vi->intr_coal_rx.max_packets;
}
}
......@@ -3262,10 +3355,10 @@ static int virtnet_get_link_ksettings(struct net_device *dev,
return 0;
}
static int virtnet_send_notf_coal_cmds(struct virtnet_info *vi,
struct ethtool_coalesce *ec)
static int virtnet_send_tx_notf_coal_cmds(struct virtnet_info *vi,
struct ethtool_coalesce *ec)
{
struct scatterlist sgs_tx, sgs_rx;
struct scatterlist sgs_tx;
int i;
vi->ctrl->coal_tx.tx_usecs = cpu_to_le32(ec->tx_coalesce_usecs);
......@@ -3277,7 +3370,6 @@ static int virtnet_send_notf_coal_cmds(struct virtnet_info *vi,
&sgs_tx))
return -EINVAL;
/* Save parameters */
vi->intr_coal_tx.max_usecs = ec->tx_coalesce_usecs;
vi->intr_coal_tx.max_packets = ec->tx_max_coalesced_frames;
for (i = 0; i < vi->max_queue_pairs; i++) {
......@@ -3285,6 +3377,40 @@ static int virtnet_send_notf_coal_cmds(struct virtnet_info *vi,
vi->sq[i].intr_coal.max_packets = ec->tx_max_coalesced_frames;
}
return 0;
}
static int virtnet_send_rx_notf_coal_cmds(struct virtnet_info *vi,
struct ethtool_coalesce *ec)
{
bool rx_ctrl_dim_on = !!ec->use_adaptive_rx_coalesce;
struct scatterlist sgs_rx;
int i;
if (rx_ctrl_dim_on && !virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL))
return -EOPNOTSUPP;
if (rx_ctrl_dim_on && (ec->rx_coalesce_usecs != vi->intr_coal_rx.max_usecs ||
ec->rx_max_coalesced_frames != vi->intr_coal_rx.max_packets))
return -EINVAL;
if (rx_ctrl_dim_on && !vi->rx_dim_enabled) {
vi->rx_dim_enabled = true;
for (i = 0; i < vi->max_queue_pairs; i++)
vi->rq[i].dim_enabled = true;
return 0;
}
if (!rx_ctrl_dim_on && vi->rx_dim_enabled) {
vi->rx_dim_enabled = false;
for (i = 0; i < vi->max_queue_pairs; i++)
vi->rq[i].dim_enabled = false;
}
/* Since the per-queue coalescing params can be set,
* we need apply the global new params even if they
* are not updated.
*/
vi->ctrl->coal_rx.rx_usecs = cpu_to_le32(ec->rx_coalesce_usecs);
vi->ctrl->coal_rx.rx_max_packets = cpu_to_le32(ec->rx_max_coalesced_frames);
sg_init_one(&sgs_rx, &vi->ctrl->coal_rx, sizeof(vi->ctrl->coal_rx));
......@@ -3294,7 +3420,6 @@ static int virtnet_send_notf_coal_cmds(struct virtnet_info *vi,
&sgs_rx))
return -EINVAL;
/* Save parameters */
vi->intr_coal_rx.max_usecs = ec->rx_coalesce_usecs;
vi->intr_coal_rx.max_packets = ec->rx_max_coalesced_frames;
for (i = 0; i < vi->max_queue_pairs; i++) {
......@@ -3305,21 +3430,55 @@ static int virtnet_send_notf_coal_cmds(struct virtnet_info *vi,
return 0;
}
static int virtnet_send_ctrl_coal_vq_cmd(struct virtnet_info *vi,
u16 vqn, u32 max_usecs, u32 max_packets)
static int virtnet_send_notf_coal_cmds(struct virtnet_info *vi,
struct ethtool_coalesce *ec)
{
struct scatterlist sgs;
int err;
vi->ctrl->coal_vq.vqn = cpu_to_le16(vqn);
vi->ctrl->coal_vq.coal.max_usecs = cpu_to_le32(max_usecs);
vi->ctrl->coal_vq.coal.max_packets = cpu_to_le32(max_packets);
sg_init_one(&sgs, &vi->ctrl->coal_vq, sizeof(vi->ctrl->coal_vq));
err = virtnet_send_tx_notf_coal_cmds(vi, ec);
if (err)
return err;
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_NOTF_COAL,
VIRTIO_NET_CTRL_NOTF_COAL_VQ_SET,
&sgs))
err = virtnet_send_rx_notf_coal_cmds(vi, ec);
if (err)
return err;
return 0;
}
static int virtnet_send_rx_notf_coal_vq_cmds(struct virtnet_info *vi,
struct ethtool_coalesce *ec,
u16 queue)
{
bool rx_ctrl_dim_on = !!ec->use_adaptive_rx_coalesce;
bool cur_rx_dim = vi->rq[queue].dim_enabled;
u32 max_usecs, max_packets;
int err;
max_usecs = vi->rq[queue].intr_coal.max_usecs;
max_packets = vi->rq[queue].intr_coal.max_packets;
if (rx_ctrl_dim_on && (ec->rx_coalesce_usecs != max_usecs ||
ec->rx_max_coalesced_frames != max_packets))
return -EINVAL;
if (rx_ctrl_dim_on && !cur_rx_dim) {
vi->rq[queue].dim_enabled = true;
return 0;
}
if (!rx_ctrl_dim_on && cur_rx_dim)
vi->rq[queue].dim_enabled = false;
/* If no params are updated, userspace ethtool will
* reject the modification.
*/
err = virtnet_send_rx_ctrl_coal_vq_cmd(vi, queue,
ec->rx_coalesce_usecs,
ec->rx_max_coalesced_frames);
if (err)
return err;
return 0;
}
......@@ -3329,27 +3488,62 @@ static int virtnet_send_notf_coal_vq_cmds(struct virtnet_info *vi,
{
int err;
err = virtnet_send_ctrl_coal_vq_cmd(vi, rxq2vq(queue),
ec->rx_coalesce_usecs,
ec->rx_max_coalesced_frames);
err = virtnet_send_rx_notf_coal_vq_cmds(vi, ec, queue);
if (err)
return err;
vi->rq[queue].intr_coal.max_usecs = ec->rx_coalesce_usecs;
vi->rq[queue].intr_coal.max_packets = ec->rx_max_coalesced_frames;
err = virtnet_send_ctrl_coal_vq_cmd(vi, txq2vq(queue),
ec->tx_coalesce_usecs,
ec->tx_max_coalesced_frames);
err = virtnet_send_tx_ctrl_coal_vq_cmd(vi, queue,
ec->tx_coalesce_usecs,
ec->tx_max_coalesced_frames);
if (err)
return err;
vi->sq[queue].intr_coal.max_usecs = ec->tx_coalesce_usecs;
vi->sq[queue].intr_coal.max_packets = ec->tx_max_coalesced_frames;
return 0;
}
static void virtnet_rx_dim_work(struct work_struct *work)
{
struct dim *dim = container_of(work, struct dim, work);
struct receive_queue *rq = container_of(dim,
struct receive_queue, dim);
struct virtnet_info *vi = rq->vq->vdev->priv;
struct net_device *dev = vi->dev;
struct dim_cq_moder update_moder;
int i, qnum, err;
if (!rtnl_trylock())
return;
/* Each rxq's work is queued by "net_dim()->schedule_work()"
* in response to NAPI traffic changes. Note that dim->profile_ix
* for each rxq is updated prior to the queuing action.
* So we only need to traverse and update profiles for all rxqs
* in the work which is holding rtnl_lock.
*/
for (i = 0; i < vi->curr_queue_pairs; i++) {
rq = &vi->rq[i];
dim = &rq->dim;
qnum = rq - vi->rq;
if (!rq->dim_enabled)
continue;
update_moder = net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
if (update_moder.usec != rq->intr_coal.max_usecs ||
update_moder.pkts != rq->intr_coal.max_packets) {
err = virtnet_send_rx_ctrl_coal_vq_cmd(vi, qnum,
update_moder.usec,
update_moder.pkts);
if (err)
pr_debug("%s: Failed to send dim parameters on rxq%d\n",
dev->name, qnum);
dim->state = DIM_START_MEASURE;
}
}
rtnl_unlock();
}
static int virtnet_coal_params_supported(struct ethtool_coalesce *ec)
{
/* usecs coalescing is supported only if VIRTIO_NET_F_NOTF_COAL
......@@ -3431,6 +3625,7 @@ static int virtnet_get_coalesce(struct net_device *dev,
ec->tx_coalesce_usecs = vi->intr_coal_tx.max_usecs;
ec->tx_max_coalesced_frames = vi->intr_coal_tx.max_packets;
ec->rx_max_coalesced_frames = vi->intr_coal_rx.max_packets;
ec->use_adaptive_rx_coalesce = vi->rx_dim_enabled;
} else {
ec->rx_max_coalesced_frames = 1;
......@@ -3488,6 +3683,7 @@ static int virtnet_get_per_queue_coalesce(struct net_device *dev,
ec->tx_coalesce_usecs = vi->sq[queue].intr_coal.max_usecs;
ec->tx_max_coalesced_frames = vi->sq[queue].intr_coal.max_packets;
ec->rx_max_coalesced_frames = vi->rq[queue].intr_coal.max_packets;
ec->use_adaptive_rx_coalesce = vi->rq[queue].dim_enabled;
} else {
ec->rx_max_coalesced_frames = 1;
......@@ -3613,7 +3809,7 @@ static int virtnet_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info)
static const struct ethtool_ops virtnet_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_MAX_FRAMES |
ETHTOOL_COALESCE_USECS,
ETHTOOL_COALESCE_USECS | ETHTOOL_COALESCE_USE_ADAPTIVE_RX,
.get_drvinfo = virtnet_get_drvinfo,
.get_link = ethtool_op_get_link,
.get_ringparam = virtnet_get_ringparam,
......@@ -4203,6 +4399,9 @@ static int virtnet_alloc_queues(struct virtnet_info *vi)
virtnet_poll_tx,
napi_tx ? napi_weight : 0);
INIT_WORK(&vi->rq[i].dim.work, virtnet_rx_dim_work);
vi->rq[i].dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
sg_init_table(vi->rq[i].sg, ARRAY_SIZE(vi->rq[i].sg));
ewma_pkt_len_init(&vi->rq[i].mrg_avg_pkt_len);
sg_init_table(vi->sq[i].sg, ARRAY_SIZE(vi->sq[i].sg));
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment