Commit 4d4ac2ec authored by Daniel Jurgens's avatar Daniel Jurgens Committed by Paolo Abeni

virtio_net: Add a lock for per queue RX coalesce

Once the RTNL locking around the control buffer is removed there can be
contention on the per queue RX interrupt coalescing data. Use a mutex
per queue. A mutex is required because virtnet_send_command can sleep.
Signed-off-by: default avatarDaniel Jurgens <danielj@nvidia.com>
Reviewed-by: default avatarHeng Qi <hengqi@linux.alibaba.com>
Tested-by: default avatarHeng Qi <hengqi@linux.alibaba.com>
Acked-by: default avatarJason Wang <jasowang@redhat.com>
Signed-off-by: default avatarPaolo Abeni <pabeni@redhat.com>
parent 650d77c5
...@@ -312,6 +312,9 @@ struct receive_queue { ...@@ -312,6 +312,9 @@ struct receive_queue {
/* Is dynamic interrupt moderation enabled? */ /* Is dynamic interrupt moderation enabled? */
bool dim_enabled; bool dim_enabled;
/* Used to protect dim_enabled and inter_coal */
struct mutex dim_lock;
/* Dynamic Interrupt Moderation */ /* Dynamic Interrupt Moderation */
struct dim dim; struct dim dim;
...@@ -2365,6 +2368,10 @@ static int virtnet_poll(struct napi_struct *napi, int budget) ...@@ -2365,6 +2368,10 @@ static int virtnet_poll(struct napi_struct *napi, int budget)
/* Out of packets? */ /* Out of packets? */
if (received < budget) { if (received < budget) {
napi_complete = virtqueue_napi_complete(napi, rq->vq, received); napi_complete = virtqueue_napi_complete(napi, rq->vq, received);
/* Intentionally not taking dim_lock here. This may result in a
* spurious net_dim call. But if that happens virtnet_rx_dim_work
* will not act on the scheduled work.
*/
if (napi_complete && rq->dim_enabled) if (napi_complete && rq->dim_enabled)
virtnet_rx_dim_update(vi, rq); virtnet_rx_dim_update(vi, rq);
} }
...@@ -3247,9 +3254,11 @@ static int virtnet_set_ringparam(struct net_device *dev, ...@@ -3247,9 +3254,11 @@ static int virtnet_set_ringparam(struct net_device *dev,
return err; return err;
/* The reason is same as the transmit virtqueue reset */ /* The reason is same as the transmit virtqueue reset */
mutex_lock(&vi->rq[i].dim_lock);
err = virtnet_send_rx_ctrl_coal_vq_cmd(vi, i, err = virtnet_send_rx_ctrl_coal_vq_cmd(vi, i,
vi->intr_coal_rx.max_usecs, vi->intr_coal_rx.max_usecs,
vi->intr_coal_rx.max_packets); vi->intr_coal_rx.max_packets);
mutex_unlock(&vi->rq[i].dim_lock);
if (err) if (err)
return err; return err;
} }
...@@ -4255,6 +4264,7 @@ static int virtnet_send_rx_notf_coal_cmds(struct virtnet_info *vi, ...@@ -4255,6 +4264,7 @@ static int virtnet_send_rx_notf_coal_cmds(struct virtnet_info *vi,
struct virtio_net_ctrl_coal_rx *coal_rx __free(kfree) = NULL; struct virtio_net_ctrl_coal_rx *coal_rx __free(kfree) = NULL;
bool rx_ctrl_dim_on = !!ec->use_adaptive_rx_coalesce; bool rx_ctrl_dim_on = !!ec->use_adaptive_rx_coalesce;
struct scatterlist sgs_rx; struct scatterlist sgs_rx;
int ret = 0;
int i; int i;
if (rx_ctrl_dim_on && !virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL)) if (rx_ctrl_dim_on && !virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL))
...@@ -4264,16 +4274,22 @@ static int virtnet_send_rx_notf_coal_cmds(struct virtnet_info *vi, ...@@ -4264,16 +4274,22 @@ static int virtnet_send_rx_notf_coal_cmds(struct virtnet_info *vi,
ec->rx_max_coalesced_frames != vi->intr_coal_rx.max_packets)) ec->rx_max_coalesced_frames != vi->intr_coal_rx.max_packets))
return -EINVAL; return -EINVAL;
/* Acquire all queues dim_locks */
for (i = 0; i < vi->max_queue_pairs; i++)
mutex_lock(&vi->rq[i].dim_lock);
if (rx_ctrl_dim_on && !vi->rx_dim_enabled) { if (rx_ctrl_dim_on && !vi->rx_dim_enabled) {
vi->rx_dim_enabled = true; vi->rx_dim_enabled = true;
for (i = 0; i < vi->max_queue_pairs; i++) for (i = 0; i < vi->max_queue_pairs; i++)
vi->rq[i].dim_enabled = true; vi->rq[i].dim_enabled = true;
return 0; goto unlock;
} }
coal_rx = kzalloc(sizeof(*coal_rx), GFP_KERNEL); coal_rx = kzalloc(sizeof(*coal_rx), GFP_KERNEL);
if (!coal_rx) if (!coal_rx) {
return -ENOMEM; ret = -ENOMEM;
goto unlock;
}
if (!rx_ctrl_dim_on && vi->rx_dim_enabled) { if (!rx_ctrl_dim_on && vi->rx_dim_enabled) {
vi->rx_dim_enabled = false; vi->rx_dim_enabled = false;
...@@ -4291,8 +4307,10 @@ static int virtnet_send_rx_notf_coal_cmds(struct virtnet_info *vi, ...@@ -4291,8 +4307,10 @@ static int virtnet_send_rx_notf_coal_cmds(struct virtnet_info *vi,
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_NOTF_COAL, if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_NOTF_COAL,
VIRTIO_NET_CTRL_NOTF_COAL_RX_SET, VIRTIO_NET_CTRL_NOTF_COAL_RX_SET,
&sgs_rx)) &sgs_rx)) {
return -EINVAL; ret = -EINVAL;
goto unlock;
}
vi->intr_coal_rx.max_usecs = ec->rx_coalesce_usecs; vi->intr_coal_rx.max_usecs = ec->rx_coalesce_usecs;
vi->intr_coal_rx.max_packets = ec->rx_max_coalesced_frames; vi->intr_coal_rx.max_packets = ec->rx_max_coalesced_frames;
...@@ -4300,8 +4318,11 @@ static int virtnet_send_rx_notf_coal_cmds(struct virtnet_info *vi, ...@@ -4300,8 +4318,11 @@ static int virtnet_send_rx_notf_coal_cmds(struct virtnet_info *vi,
vi->rq[i].intr_coal.max_usecs = ec->rx_coalesce_usecs; vi->rq[i].intr_coal.max_usecs = ec->rx_coalesce_usecs;
vi->rq[i].intr_coal.max_packets = ec->rx_max_coalesced_frames; vi->rq[i].intr_coal.max_packets = ec->rx_max_coalesced_frames;
} }
unlock:
for (i = vi->max_queue_pairs - 1; i >= 0; i--)
mutex_unlock(&vi->rq[i].dim_lock);
return 0; return ret;
} }
static int virtnet_send_notf_coal_cmds(struct virtnet_info *vi, static int virtnet_send_notf_coal_cmds(struct virtnet_info *vi,
...@@ -4325,19 +4346,24 @@ static int virtnet_send_rx_notf_coal_vq_cmds(struct virtnet_info *vi, ...@@ -4325,19 +4346,24 @@ static int virtnet_send_rx_notf_coal_vq_cmds(struct virtnet_info *vi,
u16 queue) u16 queue)
{ {
bool rx_ctrl_dim_on = !!ec->use_adaptive_rx_coalesce; bool rx_ctrl_dim_on = !!ec->use_adaptive_rx_coalesce;
bool cur_rx_dim = vi->rq[queue].dim_enabled;
u32 max_usecs, max_packets; u32 max_usecs, max_packets;
bool cur_rx_dim;
int err; int err;
mutex_lock(&vi->rq[queue].dim_lock);
cur_rx_dim = vi->rq[queue].dim_enabled;
max_usecs = vi->rq[queue].intr_coal.max_usecs; max_usecs = vi->rq[queue].intr_coal.max_usecs;
max_packets = vi->rq[queue].intr_coal.max_packets; max_packets = vi->rq[queue].intr_coal.max_packets;
if (rx_ctrl_dim_on && (ec->rx_coalesce_usecs != max_usecs || if (rx_ctrl_dim_on && (ec->rx_coalesce_usecs != max_usecs ||
ec->rx_max_coalesced_frames != max_packets)) ec->rx_max_coalesced_frames != max_packets)) {
mutex_unlock(&vi->rq[queue].dim_lock);
return -EINVAL; return -EINVAL;
}
if (rx_ctrl_dim_on && !cur_rx_dim) { if (rx_ctrl_dim_on && !cur_rx_dim) {
vi->rq[queue].dim_enabled = true; vi->rq[queue].dim_enabled = true;
mutex_unlock(&vi->rq[queue].dim_lock);
return 0; return 0;
} }
...@@ -4350,10 +4376,8 @@ static int virtnet_send_rx_notf_coal_vq_cmds(struct virtnet_info *vi, ...@@ -4350,10 +4376,8 @@ static int virtnet_send_rx_notf_coal_vq_cmds(struct virtnet_info *vi,
err = virtnet_send_rx_ctrl_coal_vq_cmd(vi, queue, err = virtnet_send_rx_ctrl_coal_vq_cmd(vi, queue,
ec->rx_coalesce_usecs, ec->rx_coalesce_usecs,
ec->rx_max_coalesced_frames); ec->rx_max_coalesced_frames);
if (err) mutex_unlock(&vi->rq[queue].dim_lock);
return err; return err;
return 0;
} }
static int virtnet_send_notf_coal_vq_cmds(struct virtnet_info *vi, static int virtnet_send_notf_coal_vq_cmds(struct virtnet_info *vi,
...@@ -4390,6 +4414,7 @@ static void virtnet_rx_dim_work(struct work_struct *work) ...@@ -4390,6 +4414,7 @@ static void virtnet_rx_dim_work(struct work_struct *work)
qnum = rq - vi->rq; qnum = rq - vi->rq;
mutex_lock(&rq->dim_lock);
if (!rq->dim_enabled) if (!rq->dim_enabled)
goto out; goto out;
...@@ -4405,6 +4430,7 @@ static void virtnet_rx_dim_work(struct work_struct *work) ...@@ -4405,6 +4430,7 @@ static void virtnet_rx_dim_work(struct work_struct *work)
dim->state = DIM_START_MEASURE; dim->state = DIM_START_MEASURE;
} }
out: out:
mutex_unlock(&rq->dim_lock);
rtnl_unlock(); rtnl_unlock();
} }
...@@ -4543,11 +4569,13 @@ static int virtnet_get_per_queue_coalesce(struct net_device *dev, ...@@ -4543,11 +4569,13 @@ static int virtnet_get_per_queue_coalesce(struct net_device *dev,
return -EINVAL; return -EINVAL;
if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL)) { if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL)) {
mutex_lock(&vi->rq[queue].dim_lock);
ec->rx_coalesce_usecs = vi->rq[queue].intr_coal.max_usecs; ec->rx_coalesce_usecs = vi->rq[queue].intr_coal.max_usecs;
ec->tx_coalesce_usecs = vi->sq[queue].intr_coal.max_usecs; ec->tx_coalesce_usecs = vi->sq[queue].intr_coal.max_usecs;
ec->tx_max_coalesced_frames = vi->sq[queue].intr_coal.max_packets; ec->tx_max_coalesced_frames = vi->sq[queue].intr_coal.max_packets;
ec->rx_max_coalesced_frames = vi->rq[queue].intr_coal.max_packets; ec->rx_max_coalesced_frames = vi->rq[queue].intr_coal.max_packets;
ec->use_adaptive_rx_coalesce = vi->rq[queue].dim_enabled; ec->use_adaptive_rx_coalesce = vi->rq[queue].dim_enabled;
mutex_unlock(&vi->rq[queue].dim_lock);
} else { } else {
ec->rx_max_coalesced_frames = 1; ec->rx_max_coalesced_frames = 1;
...@@ -5377,6 +5405,7 @@ static int virtnet_alloc_queues(struct virtnet_info *vi) ...@@ -5377,6 +5405,7 @@ static int virtnet_alloc_queues(struct virtnet_info *vi)
u64_stats_init(&vi->rq[i].stats.syncp); u64_stats_init(&vi->rq[i].stats.syncp);
u64_stats_init(&vi->sq[i].stats.syncp); u64_stats_init(&vi->sq[i].stats.syncp);
mutex_init(&vi->rq[i].dim_lock);
} }
return 0; return 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment