Commit 6f5312f8 authored by Eli Cohen's avatar Eli Cohen Committed by Michael S. Tsirkin

vdpa/mlx5: Add support for running with virtio_vdpa

In order to support running vdpa using vritio_vdpa driver, we need  to
create a different kind of MR, one that has 1:1 mapping, since the
addresses referring to virtqueues are dma addresses.

We create the 1:1 MR in mlx5_vdpa_dev_add() only in case firmware
supports the general capability umem_uid_0. The reason for that is that
1:1 MRs must be created with uid == 0 while virtqueue objects can be
created with uid == 0 only when the firmware capability is on.

If the set_map() callback is called with new translations provided
through iotlb, the driver will destroy the 1:1 MR and create a regular
one.
Signed-off-by: default avatarEli Cohen <elic@nvidia.com>
Link: https://lore.kernel.org/r/20210602085854.62690-1-elic@nvidia.comSigned-off-by: default avatarMichael S. Tsirkin <mst@redhat.com>
Acked-by: default avatarJason Wang <jasowang@redhat.com>
parent 7d23dcdf
...@@ -35,6 +35,7 @@ struct mlx5_vdpa_mr { ...@@ -35,6 +35,7 @@ struct mlx5_vdpa_mr {
/* serialize mkey creation and destruction */ /* serialize mkey creation and destruction */
struct mutex mkey_mtx; struct mutex mkey_mtx;
bool user_mr;
}; };
struct mlx5_vdpa_resources { struct mlx5_vdpa_resources {
......
...@@ -355,7 +355,7 @@ static int add_direct_chain(struct mlx5_vdpa_dev *mvdev, u64 start, u64 size, u8 ...@@ -355,7 +355,7 @@ static int add_direct_chain(struct mlx5_vdpa_dev *mvdev, u64 start, u64 size, u8
* indirect memory key that provides access to the enitre address space given * indirect memory key that provides access to the enitre address space given
* by iotlb. * by iotlb.
*/ */
static int _mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb) static int create_user_mr(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb)
{ {
struct mlx5_vdpa_mr *mr = &mvdev->mr; struct mlx5_vdpa_mr *mr = &mvdev->mr;
struct mlx5_vdpa_direct_mr *dmr; struct mlx5_vdpa_direct_mr *dmr;
...@@ -369,9 +369,6 @@ static int _mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb ...@@ -369,9 +369,6 @@ static int _mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb
int err = 0; int err = 0;
int nnuls; int nnuls;
if (mr->initialized)
return 0;
INIT_LIST_HEAD(&mr->head); INIT_LIST_HEAD(&mr->head);
for (map = vhost_iotlb_itree_first(iotlb, start, last); map; for (map = vhost_iotlb_itree_first(iotlb, start, last); map;
map = vhost_iotlb_itree_next(map, start, last)) { map = vhost_iotlb_itree_next(map, start, last)) {
...@@ -409,7 +406,7 @@ static int _mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb ...@@ -409,7 +406,7 @@ static int _mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb
if (err) if (err)
goto err_chain; goto err_chain;
mr->initialized = true; mr->user_mr = true;
return 0; return 0;
err_chain: err_chain:
...@@ -421,33 +418,94 @@ static int _mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb ...@@ -421,33 +418,94 @@ static int _mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb
return err; return err;
} }
int mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb) static int create_dma_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_mr *mr)
{
int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
void *mkc;
u32 *in;
int err;
in = kzalloc(inlen, GFP_KERNEL);
if (!in)
return -ENOMEM;
mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_PA);
MLX5_SET(mkc, mkc, length64, 1);
MLX5_SET(mkc, mkc, lw, 1);
MLX5_SET(mkc, mkc, lr, 1);
MLX5_SET(mkc, mkc, pd, mvdev->res.pdn);
MLX5_SET(mkc, mkc, qpn, 0xffffff);
err = mlx5_vdpa_create_mkey(mvdev, &mr->mkey, in, inlen);
if (!err)
mr->user_mr = false;
kfree(in);
return err;
}
static void destroy_dma_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_mr *mr)
{
mlx5_vdpa_destroy_mkey(mvdev, &mr->mkey);
}
static int _mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb)
{ {
struct mlx5_vdpa_mr *mr = &mvdev->mr; struct mlx5_vdpa_mr *mr = &mvdev->mr;
int err; int err;
mutex_lock(&mr->mkey_mtx); if (mr->initialized)
return 0;
if (iotlb)
err = create_user_mr(mvdev, iotlb);
else
err = create_dma_mr(mvdev, mr);
if (!err)
mr->initialized = true;
return err;
}
int mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb)
{
int err;
mutex_lock(&mvdev->mr.mkey_mtx);
err = _mlx5_vdpa_create_mr(mvdev, iotlb); err = _mlx5_vdpa_create_mr(mvdev, iotlb);
mutex_unlock(&mr->mkey_mtx); mutex_unlock(&mvdev->mr.mkey_mtx);
return err; return err;
} }
void mlx5_vdpa_destroy_mr(struct mlx5_vdpa_dev *mvdev) static void destroy_user_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_mr *mr)
{ {
struct mlx5_vdpa_mr *mr = &mvdev->mr;
struct mlx5_vdpa_direct_mr *dmr; struct mlx5_vdpa_direct_mr *dmr;
struct mlx5_vdpa_direct_mr *n; struct mlx5_vdpa_direct_mr *n;
mutex_lock(&mr->mkey_mtx);
if (!mr->initialized)
goto out;
destroy_indirect_key(mvdev, mr); destroy_indirect_key(mvdev, mr);
list_for_each_entry_safe_reverse(dmr, n, &mr->head, list) { list_for_each_entry_safe_reverse(dmr, n, &mr->head, list) {
list_del_init(&dmr->list); list_del_init(&dmr->list);
unmap_direct_mr(mvdev, dmr); unmap_direct_mr(mvdev, dmr);
kfree(dmr); kfree(dmr);
} }
}
void mlx5_vdpa_destroy_mr(struct mlx5_vdpa_dev *mvdev)
{
struct mlx5_vdpa_mr *mr = &mvdev->mr;
mutex_lock(&mr->mkey_mtx);
if (!mr->initialized)
goto out;
if (mr->user_mr)
destroy_user_mr(mvdev, mr);
else
destroy_dma_mr(mvdev, mr);
memset(mr, 0, sizeof(*mr)); memset(mr, 0, sizeof(*mr));
mr->initialized = false; mr->initialized = false;
out: out:
......
...@@ -1781,6 +1781,10 @@ static void mlx5_vdpa_set_status(struct vdpa_device *vdev, u8 status) ...@@ -1781,6 +1781,10 @@ static void mlx5_vdpa_set_status(struct vdpa_device *vdev, u8 status)
ndev->mvdev.status = 0; ndev->mvdev.status = 0;
ndev->mvdev.mlx_features = 0; ndev->mvdev.mlx_features = 0;
++mvdev->generation; ++mvdev->generation;
if (MLX5_CAP_GEN(mvdev->mdev, umem_uid_0)) {
if (mlx5_vdpa_create_mr(mvdev, NULL))
mlx5_vdpa_warn(mvdev, "create MR failed\n");
}
return; return;
} }
...@@ -1861,6 +1865,7 @@ static void mlx5_vdpa_free(struct vdpa_device *vdev) ...@@ -1861,6 +1865,7 @@ static void mlx5_vdpa_free(struct vdpa_device *vdev)
ndev = to_mlx5_vdpa_ndev(mvdev); ndev = to_mlx5_vdpa_ndev(mvdev);
free_resources(ndev); free_resources(ndev);
mlx5_vdpa_destroy_mr(mvdev);
if (!is_zero_ether_addr(ndev->config.mac)) { if (!is_zero_ether_addr(ndev->config.mac)) {
pfmdev = pci_get_drvdata(pci_physfn(mvdev->mdev->pdev)); pfmdev = pci_get_drvdata(pci_physfn(mvdev->mdev->pdev));
mlx5_mpfs_del_mac(pfmdev, ndev->config.mac); mlx5_mpfs_del_mac(pfmdev, ndev->config.mac);
...@@ -2037,9 +2042,15 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name) ...@@ -2037,9 +2042,15 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name)
if (err) if (err)
goto err_mpfs; goto err_mpfs;
if (MLX5_CAP_GEN(mvdev->mdev, umem_uid_0)) {
err = mlx5_vdpa_create_mr(mvdev, NULL);
if (err)
goto err_res;
}
err = alloc_resources(ndev); err = alloc_resources(ndev);
if (err) if (err)
goto err_res; goto err_mr;
mvdev->vdev.mdev = &mgtdev->mgtdev; mvdev->vdev.mdev = &mgtdev->mgtdev;
err = _vdpa_register_device(&mvdev->vdev, 2 * mlx5_vdpa_max_qps(max_vqs)); err = _vdpa_register_device(&mvdev->vdev, 2 * mlx5_vdpa_max_qps(max_vqs));
...@@ -2051,6 +2062,8 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name) ...@@ -2051,6 +2062,8 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name)
err_reg: err_reg:
free_resources(ndev); free_resources(ndev);
err_mr:
mlx5_vdpa_destroy_mr(mvdev);
err_res: err_res:
mlx5_vdpa_free_resources(&ndev->mvdev); mlx5_vdpa_free_resources(&ndev->mvdev);
err_mpfs: err_mpfs:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment