Commit 625e4b59 authored by Dragos Tatulea's avatar Dragos Tatulea Committed by Michael S. Tsirkin

vdpa/mlx5: Improve mr update flow

The current flow for updating an mr works directly on mvdev->mr which
makes it cumbersome to handle multiple new mr structs.

This patch makes the flow more straightforward by having
mlx5_vdpa_create_mr return a new mr which will update the old mr (if
any). The old mr will be deleted and unlinked from mvdev. For the case
when the iotlb is empty (not NULL), the old mr will be cleared.

This change paves the way for adding mrs for different ASIDs.

The initialized bool is no longer needed as mr is now a pointer in the
mlx5_vdpa_dev struct which will be NULL when not initialized.
Acked-by: default avatarEugenio Pérez <eperezma@redhat.com>
Acked-by: default avatarJason Wang <jasowang@redhat.com>
Signed-off-by: default avatarDragos Tatulea <dtatulea@nvidia.com>
Message-Id: <20231018171456.1624030-14-dtatulea@nvidia.com>
Signed-off-by: default avatarMichael S. Tsirkin <mst@redhat.com>
Reviewed-by: default avatarSi-Wei Liu <si-wei.liu@oracle.com>
Tested-by: default avatarSi-Wei Liu <si-wei.liu@oracle.com>
Tested-by: default avatarLei Yang <leiyang@redhat.com>
parent 186e2538
......@@ -31,8 +31,6 @@ struct mlx5_vdpa_mr {
struct list_head head;
unsigned long num_directs;
unsigned long num_klms;
/* state of dvq mr */
bool initialized;
bool user_mr;
};
......@@ -91,7 +89,7 @@ struct mlx5_vdpa_dev {
u16 max_idx;
u32 generation;
struct mlx5_vdpa_mr mr;
struct mlx5_vdpa_mr *mr;
/* serialize mr access */
struct mutex mr_mtx;
struct mlx5_control_vq cvq;
......@@ -114,14 +112,14 @@ void mlx5_vdpa_free_resources(struct mlx5_vdpa_dev *mvdev);
int mlx5_vdpa_create_mkey(struct mlx5_vdpa_dev *mvdev, u32 *mkey, u32 *in,
int inlen);
int mlx5_vdpa_destroy_mkey(struct mlx5_vdpa_dev *mvdev, u32 mkey);
int mlx5_vdpa_handle_set_map(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb,
bool *change_map, unsigned int asid);
int mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev,
struct mlx5_vdpa_mr *mr,
struct vhost_iotlb *iotlb);
struct mlx5_vdpa_mr *mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev,
struct vhost_iotlb *iotlb);
void mlx5_vdpa_destroy_mr_resources(struct mlx5_vdpa_dev *mvdev);
void mlx5_vdpa_destroy_mr(struct mlx5_vdpa_dev *mvdev,
struct mlx5_vdpa_mr *mr);
void mlx5_vdpa_update_mr(struct mlx5_vdpa_dev *mvdev,
struct mlx5_vdpa_mr *mr,
unsigned int asid);
int mlx5_vdpa_update_cvq_iotlb(struct mlx5_vdpa_dev *mvdev,
struct vhost_iotlb *iotlb,
unsigned int asid);
......
......@@ -495,30 +495,51 @@ static void destroy_user_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_mr *mr
static void _mlx5_vdpa_destroy_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_mr *mr)
{
if (!mr->initialized)
return;
if (mr->user_mr)
destroy_user_mr(mvdev, mr);
else
destroy_dma_mr(mvdev, mr);
mr->initialized = false;
}
void mlx5_vdpa_destroy_mr(struct mlx5_vdpa_dev *mvdev,
struct mlx5_vdpa_mr *mr)
{
if (!mr)
return;
mutex_lock(&mvdev->mr_mtx);
_mlx5_vdpa_destroy_mr(mvdev, mr);
if (mvdev->mr == mr)
mvdev->mr = NULL;
mutex_unlock(&mvdev->mr_mtx);
kfree(mr);
}
void mlx5_vdpa_update_mr(struct mlx5_vdpa_dev *mvdev,
struct mlx5_vdpa_mr *new_mr,
unsigned int asid)
{
struct mlx5_vdpa_mr *old_mr = mvdev->mr;
mutex_lock(&mvdev->mr_mtx);
mvdev->mr = new_mr;
if (old_mr) {
_mlx5_vdpa_destroy_mr(mvdev, old_mr);
kfree(old_mr);
}
mutex_unlock(&mvdev->mr_mtx);
}
void mlx5_vdpa_destroy_mr_resources(struct mlx5_vdpa_dev *mvdev)
{
mlx5_vdpa_destroy_mr(mvdev, &mvdev->mr);
mlx5_vdpa_destroy_mr(mvdev, mvdev->mr);
prune_iotlb(mvdev);
}
......@@ -528,52 +549,36 @@ static int _mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev,
{
int err;
if (mr->initialized)
return 0;
if (iotlb)
err = create_user_mr(mvdev, mr, iotlb);
else
err = create_dma_mr(mvdev, mr);
if (err)
return err;
mr->initialized = true;
return 0;
return err;
}
int mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev,
struct mlx5_vdpa_mr *mr,
struct vhost_iotlb *iotlb)
struct mlx5_vdpa_mr *mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev,
struct vhost_iotlb *iotlb)
{
struct mlx5_vdpa_mr *mr;
int err;
mr = kzalloc(sizeof(*mr), GFP_KERNEL);
if (!mr)
return ERR_PTR(-ENOMEM);
mutex_lock(&mvdev->mr_mtx);
err = _mlx5_vdpa_create_mr(mvdev, mr, iotlb);
mutex_unlock(&mvdev->mr_mtx);
return err;
}
int mlx5_vdpa_handle_set_map(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb,
bool *change_map, unsigned int asid)
{
struct mlx5_vdpa_mr *mr = &mvdev->mr;
int err = 0;
if (err)
goto out_err;
*change_map = false;
mutex_lock(&mvdev->mr_mtx);
if (mr->initialized) {
mlx5_vdpa_info(mvdev, "memory map update\n");
*change_map = true;
}
if (!*change_map)
err = _mlx5_vdpa_create_mr(mvdev, mr, iotlb);
mutex_unlock(&mvdev->mr_mtx);
return mr;
return err;
out_err:
kfree(mr);
return ERR_PTR(err);
}
int mlx5_vdpa_update_cvq_iotlb(struct mlx5_vdpa_dev *mvdev,
......@@ -597,11 +602,13 @@ int mlx5_vdpa_update_cvq_iotlb(struct mlx5_vdpa_dev *mvdev,
int mlx5_vdpa_create_dma_mr(struct mlx5_vdpa_dev *mvdev)
{
int err;
struct mlx5_vdpa_mr *mr;
err = mlx5_vdpa_create_mr(mvdev, &mvdev->mr, NULL);
if (err)
return err;
mr = mlx5_vdpa_create_mr(mvdev, NULL);
if (IS_ERR(mr))
return PTR_ERR(mr);
mlx5_vdpa_update_mr(mvdev, mr, 0);
return mlx5_vdpa_update_cvq_iotlb(mvdev, NULL, 0);
}
......@@ -913,7 +913,7 @@ static int create_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtque
MLX5_SET64(virtio_q, vq_ctx, desc_addr, mvq->desc_addr);
MLX5_SET64(virtio_q, vq_ctx, used_addr, mvq->device_addr);
MLX5_SET64(virtio_q, vq_ctx, available_addr, mvq->driver_addr);
MLX5_SET(virtio_q, vq_ctx, virtio_q_mkey, ndev->mvdev.mr.mkey);
MLX5_SET(virtio_q, vq_ctx, virtio_q_mkey, ndev->mvdev.mr->mkey);
MLX5_SET(virtio_q, vq_ctx, umem_1_id, mvq->umem1.id);
MLX5_SET(virtio_q, vq_ctx, umem_1_size, mvq->umem1.size);
MLX5_SET(virtio_q, vq_ctx, umem_2_id, mvq->umem2.id);
......@@ -2673,7 +2673,7 @@ static void restore_channels_info(struct mlx5_vdpa_net *ndev)
}
static int mlx5_vdpa_change_map(struct mlx5_vdpa_dev *mvdev,
struct vhost_iotlb *iotlb, unsigned int asid)
struct mlx5_vdpa_mr *new_mr, unsigned int asid)
{
struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
int err;
......@@ -2681,27 +2681,18 @@ static int mlx5_vdpa_change_map(struct mlx5_vdpa_dev *mvdev,
suspend_vqs(ndev);
err = save_channels_info(ndev);
if (err)
goto err_mr;
return err;
teardown_driver(ndev);
mlx5_vdpa_destroy_mr(mvdev, &mvdev->mr);
err = mlx5_vdpa_create_mr(mvdev, &mvdev->mr, iotlb);
if (err)
goto err_mr;
mlx5_vdpa_update_mr(mvdev, new_mr, asid);
if (!(mvdev->status & VIRTIO_CONFIG_S_DRIVER_OK) || mvdev->suspended)
goto err_mr;
return 0;
restore_channels_info(ndev);
err = setup_driver(mvdev);
if (err)
goto err_setup;
return 0;
err_setup:
mlx5_vdpa_destroy_mr(mvdev, &mvdev->mr);
err_mr:
return err;
}
......@@ -2919,26 +2910,40 @@ static u32 mlx5_vdpa_get_generation(struct vdpa_device *vdev)
static int set_map_data(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb,
unsigned int asid)
{
bool change_map;
struct mlx5_vdpa_mr *new_mr;
int err;
if (mvdev->group2asid[MLX5_VDPA_DATAVQ_GROUP] != asid)
goto end;
err = mlx5_vdpa_handle_set_map(mvdev, iotlb, &change_map, asid);
if (err) {
mlx5_vdpa_warn(mvdev, "set map failed(%d)\n", err);
return err;
if (vhost_iotlb_itree_first(iotlb, 0, U64_MAX)) {
new_mr = mlx5_vdpa_create_mr(mvdev, iotlb);
if (IS_ERR(new_mr)) {
err = PTR_ERR(new_mr);
mlx5_vdpa_warn(mvdev, "create map failed(%d)\n", err);
return err;
}
} else {
/* Empty iotlbs don't have an mr but will clear the previous mr. */
new_mr = NULL;
}
if (change_map) {
err = mlx5_vdpa_change_map(mvdev, iotlb, asid);
if (err)
return err;
if (!mvdev->mr) {
mlx5_vdpa_update_mr(mvdev, new_mr, asid);
} else {
err = mlx5_vdpa_change_map(mvdev, new_mr, asid);
if (err) {
mlx5_vdpa_warn(mvdev, "change map failed(%d)\n", err);
goto out_err;
}
}
end:
return mlx5_vdpa_update_cvq_iotlb(mvdev, iotlb, asid);
out_err:
mlx5_vdpa_destroy_mr(mvdev, new_mr);
return err;
}
static int mlx5_vdpa_set_map(struct vdpa_device *vdev, unsigned int asid,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment