Commit 7be3412a authored by Parav Pandit's avatar Parav Pandit Committed by Saeed Mahameed

net/mlx5: Use dma device access helper

Use the PCI device directly for dma accesses as non PCI device unlikely
support IOMMU and dma mappings.
Introduce and use helper routine to access DMA device.
Signed-off-by: default avatarParav Pandit <parav@nvidia.com>
Reviewed-by: default avatarVu Pham <vuhuong@nvidia.com>
Signed-off-by: default avatarSaeed Mahameed <saeedm@nvidia.com>
parent 036e19b9
...@@ -56,8 +56,8 @@ static void *mlx5_dma_zalloc_coherent_node(struct mlx5_core_dev *dev, ...@@ -56,8 +56,8 @@ static void *mlx5_dma_zalloc_coherent_node(struct mlx5_core_dev *dev,
size_t size, dma_addr_t *dma_handle, size_t size, dma_addr_t *dma_handle,
int node) int node)
{ {
struct device *device = mlx5_core_dma_dev(dev);
struct mlx5_priv *priv = &dev->priv; struct mlx5_priv *priv = &dev->priv;
struct device *device = dev->device;
int original_node; int original_node;
void *cpu_handle; void *cpu_handle;
...@@ -111,7 +111,7 @@ EXPORT_SYMBOL(mlx5_buf_alloc); ...@@ -111,7 +111,7 @@ EXPORT_SYMBOL(mlx5_buf_alloc);
void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_frag_buf *buf) void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_frag_buf *buf)
{ {
dma_free_coherent(dev->device, buf->size, buf->frags->buf, dma_free_coherent(mlx5_core_dma_dev(dev), buf->size, buf->frags->buf,
buf->frags->map); buf->frags->map);
kfree(buf->frags); kfree(buf->frags);
...@@ -140,7 +140,7 @@ int mlx5_frag_buf_alloc_node(struct mlx5_core_dev *dev, int size, ...@@ -140,7 +140,7 @@ int mlx5_frag_buf_alloc_node(struct mlx5_core_dev *dev, int size,
if (!frag->buf) if (!frag->buf)
goto err_free_buf; goto err_free_buf;
if (frag->map & ((1 << buf->page_shift) - 1)) { if (frag->map & ((1 << buf->page_shift) - 1)) {
dma_free_coherent(dev->device, frag_sz, dma_free_coherent(mlx5_core_dma_dev(dev), frag_sz,
buf->frags[i].buf, buf->frags[i].map); buf->frags[i].buf, buf->frags[i].map);
mlx5_core_warn(dev, "unexpected map alignment: %pad, page_shift=%d\n", mlx5_core_warn(dev, "unexpected map alignment: %pad, page_shift=%d\n",
&frag->map, buf->page_shift); &frag->map, buf->page_shift);
...@@ -153,7 +153,7 @@ int mlx5_frag_buf_alloc_node(struct mlx5_core_dev *dev, int size, ...@@ -153,7 +153,7 @@ int mlx5_frag_buf_alloc_node(struct mlx5_core_dev *dev, int size,
err_free_buf: err_free_buf:
while (i--) while (i--)
dma_free_coherent(dev->device, PAGE_SIZE, buf->frags[i].buf, dma_free_coherent(mlx5_core_dma_dev(dev), PAGE_SIZE, buf->frags[i].buf,
buf->frags[i].map); buf->frags[i].map);
kfree(buf->frags); kfree(buf->frags);
err_out: err_out:
...@@ -169,7 +169,7 @@ void mlx5_frag_buf_free(struct mlx5_core_dev *dev, struct mlx5_frag_buf *buf) ...@@ -169,7 +169,7 @@ void mlx5_frag_buf_free(struct mlx5_core_dev *dev, struct mlx5_frag_buf *buf)
for (i = 0; i < buf->npages; i++) { for (i = 0; i < buf->npages; i++) {
int frag_sz = min_t(int, size, PAGE_SIZE); int frag_sz = min_t(int, size, PAGE_SIZE);
dma_free_coherent(dev->device, frag_sz, buf->frags[i].buf, dma_free_coherent(mlx5_core_dma_dev(dev), frag_sz, buf->frags[i].buf,
buf->frags[i].map); buf->frags[i].map);
size -= frag_sz; size -= frag_sz;
} }
...@@ -275,7 +275,7 @@ void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db) ...@@ -275,7 +275,7 @@ void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db)
__set_bit(db->index, db->u.pgdir->bitmap); __set_bit(db->index, db->u.pgdir->bitmap);
if (bitmap_full(db->u.pgdir->bitmap, db_per_page)) { if (bitmap_full(db->u.pgdir->bitmap, db_per_page)) {
dma_free_coherent(dev->device, PAGE_SIZE, dma_free_coherent(mlx5_core_dma_dev(dev), PAGE_SIZE,
db->u.pgdir->db_page, db->u.pgdir->db_dma); db->u.pgdir->db_page, db->u.pgdir->db_dma);
list_del(&db->u.pgdir->list); list_del(&db->u.pgdir->list);
bitmap_free(db->u.pgdir->bitmap); bitmap_free(db->u.pgdir->bitmap);
......
...@@ -1899,9 +1899,7 @@ static void create_msg_cache(struct mlx5_core_dev *dev) ...@@ -1899,9 +1899,7 @@ static void create_msg_cache(struct mlx5_core_dev *dev)
static int alloc_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd) static int alloc_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd)
{ {
struct device *ddev = dev->device; cmd->cmd_alloc_buf = dma_alloc_coherent(mlx5_core_dma_dev(dev), MLX5_ADAPTER_PAGE_SIZE,
cmd->cmd_alloc_buf = dma_alloc_coherent(ddev, MLX5_ADAPTER_PAGE_SIZE,
&cmd->alloc_dma, GFP_KERNEL); &cmd->alloc_dma, GFP_KERNEL);
if (!cmd->cmd_alloc_buf) if (!cmd->cmd_alloc_buf)
return -ENOMEM; return -ENOMEM;
...@@ -1914,9 +1912,9 @@ static int alloc_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd) ...@@ -1914,9 +1912,9 @@ static int alloc_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd)
return 0; return 0;
} }
dma_free_coherent(ddev, MLX5_ADAPTER_PAGE_SIZE, cmd->cmd_alloc_buf, dma_free_coherent(mlx5_core_dma_dev(dev), MLX5_ADAPTER_PAGE_SIZE, cmd->cmd_alloc_buf,
cmd->alloc_dma); cmd->alloc_dma);
cmd->cmd_alloc_buf = dma_alloc_coherent(ddev, cmd->cmd_alloc_buf = dma_alloc_coherent(mlx5_core_dma_dev(dev),
2 * MLX5_ADAPTER_PAGE_SIZE - 1, 2 * MLX5_ADAPTER_PAGE_SIZE - 1,
&cmd->alloc_dma, GFP_KERNEL); &cmd->alloc_dma, GFP_KERNEL);
if (!cmd->cmd_alloc_buf) if (!cmd->cmd_alloc_buf)
...@@ -1930,9 +1928,7 @@ static int alloc_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd) ...@@ -1930,9 +1928,7 @@ static int alloc_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd)
static void free_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd) static void free_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd)
{ {
struct device *ddev = dev->device; dma_free_coherent(mlx5_core_dma_dev(dev), cmd->alloc_size, cmd->cmd_alloc_buf,
dma_free_coherent(ddev, cmd->alloc_size, cmd->cmd_alloc_buf,
cmd->alloc_dma); cmd->alloc_dma);
} }
...@@ -1964,7 +1960,7 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev) ...@@ -1964,7 +1960,7 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev)
if (!cmd->stats) if (!cmd->stats)
return -ENOMEM; return -ENOMEM;
cmd->pool = dma_pool_create("mlx5_cmd", dev->device, size, align, 0); cmd->pool = dma_pool_create("mlx5_cmd", mlx5_core_dma_dev(dev), size, align, 0);
if (!cmd->pool) { if (!cmd->pool) {
err = -ENOMEM; err = -ENOMEM;
goto dma_pool_err; goto dma_pool_err;
......
...@@ -124,7 +124,7 @@ static void mlx5_fw_tracer_ownership_release(struct mlx5_fw_tracer *tracer) ...@@ -124,7 +124,7 @@ static void mlx5_fw_tracer_ownership_release(struct mlx5_fw_tracer *tracer)
static int mlx5_fw_tracer_create_log_buf(struct mlx5_fw_tracer *tracer) static int mlx5_fw_tracer_create_log_buf(struct mlx5_fw_tracer *tracer)
{ {
struct mlx5_core_dev *dev = tracer->dev; struct mlx5_core_dev *dev = tracer->dev;
struct device *ddev = &dev->pdev->dev; struct device *ddev;
dma_addr_t dma; dma_addr_t dma;
void *buff; void *buff;
gfp_t gfp; gfp_t gfp;
...@@ -142,6 +142,7 @@ static int mlx5_fw_tracer_create_log_buf(struct mlx5_fw_tracer *tracer) ...@@ -142,6 +142,7 @@ static int mlx5_fw_tracer_create_log_buf(struct mlx5_fw_tracer *tracer)
} }
tracer->buff.log_buf = buff; tracer->buff.log_buf = buff;
ddev = mlx5_core_dma_dev(dev);
dma = dma_map_single(ddev, buff, tracer->buff.size, DMA_FROM_DEVICE); dma = dma_map_single(ddev, buff, tracer->buff.size, DMA_FROM_DEVICE);
if (dma_mapping_error(ddev, dma)) { if (dma_mapping_error(ddev, dma)) {
mlx5_core_warn(dev, "FWTracer: Unable to map DMA: %d\n", mlx5_core_warn(dev, "FWTracer: Unable to map DMA: %d\n",
...@@ -162,11 +163,12 @@ static int mlx5_fw_tracer_create_log_buf(struct mlx5_fw_tracer *tracer) ...@@ -162,11 +163,12 @@ static int mlx5_fw_tracer_create_log_buf(struct mlx5_fw_tracer *tracer)
static void mlx5_fw_tracer_destroy_log_buf(struct mlx5_fw_tracer *tracer) static void mlx5_fw_tracer_destroy_log_buf(struct mlx5_fw_tracer *tracer)
{ {
struct mlx5_core_dev *dev = tracer->dev; struct mlx5_core_dev *dev = tracer->dev;
struct device *ddev = &dev->pdev->dev; struct device *ddev;
if (!tracer->buff.log_buf) if (!tracer->buff.log_buf)
return; return;
ddev = mlx5_core_dma_dev(dev);
dma_unmap_single(ddev, tracer->buff.dma, tracer->buff.size, DMA_FROM_DEVICE); dma_unmap_single(ddev, tracer->buff.dma, tracer->buff.size, DMA_FROM_DEVICE);
free_pages((unsigned long)tracer->buff.log_buf, get_order(tracer->buff.size)); free_pages((unsigned long)tracer->buff.log_buf, get_order(tracer->buff.size));
} }
......
...@@ -78,7 +78,7 @@ static int mlx5_rsc_dump_trigger(struct mlx5_core_dev *dev, struct mlx5_rsc_dump ...@@ -78,7 +78,7 @@ static int mlx5_rsc_dump_trigger(struct mlx5_core_dev *dev, struct mlx5_rsc_dump
struct page *page) struct page *page)
{ {
struct mlx5_rsc_dump *rsc_dump = dev->rsc_dump; struct mlx5_rsc_dump *rsc_dump = dev->rsc_dump;
struct device *ddev = &dev->pdev->dev; struct device *ddev = mlx5_core_dma_dev(dev);
u32 out_seq_num; u32 out_seq_num;
u32 in_seq_num; u32 in_seq_num;
dma_addr_t dma; dma_addr_t dma;
......
...@@ -9,7 +9,7 @@ ...@@ -9,7 +9,7 @@
static int mlx5e_xsk_map_pool(struct mlx5e_priv *priv, static int mlx5e_xsk_map_pool(struct mlx5e_priv *priv,
struct xsk_buff_pool *pool) struct xsk_buff_pool *pool)
{ {
struct device *dev = priv->mdev->device; struct device *dev = mlx5_core_dma_dev(priv->mdev);
return xsk_pool_dma_map(pool, dev, 0); return xsk_pool_dma_map(pool, dev, 0);
} }
......
...@@ -253,7 +253,7 @@ resync_post_get_progress_params(struct mlx5e_icosq *sq, ...@@ -253,7 +253,7 @@ resync_post_get_progress_params(struct mlx5e_icosq *sq,
goto err_out; goto err_out;
} }
pdev = sq->channel->priv->mdev->device; pdev = mlx5_core_dma_dev(sq->channel->priv->mdev);
buf->dma_addr = dma_map_single(pdev, &buf->progress, buf->dma_addr = dma_map_single(pdev, &buf->progress,
PROGRESS_PARAMS_PADDED_SIZE, DMA_FROM_DEVICE); PROGRESS_PARAMS_PADDED_SIZE, DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(pdev, buf->dma_addr))) { if (unlikely(dma_mapping_error(pdev, buf->dma_addr))) {
...@@ -390,7 +390,7 @@ void mlx5e_ktls_handle_get_psv_completion(struct mlx5e_icosq_wqe_info *wi, ...@@ -390,7 +390,7 @@ void mlx5e_ktls_handle_get_psv_completion(struct mlx5e_icosq_wqe_info *wi,
priv_rx = buf->priv_rx; priv_rx = buf->priv_rx;
resync = &priv_rx->resync; resync = &priv_rx->resync;
dev = resync->priv->mdev->device; dev = mlx5_core_dma_dev(resync->priv->mdev);
if (unlikely(test_bit(MLX5E_PRIV_RX_FLAG_DELETING, priv_rx->flags))) if (unlikely(test_bit(MLX5E_PRIV_RX_FLAG_DELETING, priv_rx->flags)))
goto out; goto out;
......
...@@ -1943,7 +1943,7 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix, ...@@ -1943,7 +1943,7 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
c->tstamp = &priv->tstamp; c->tstamp = &priv->tstamp;
c->ix = ix; c->ix = ix;
c->cpu = cpu; c->cpu = cpu;
c->pdev = priv->mdev->device; c->pdev = mlx5_core_dma_dev(priv->mdev);
c->netdev = priv->netdev; c->netdev = priv->netdev;
c->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.mkey.key); c->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.mkey.key);
c->num_tc = params->num_tc; c->num_tc = params->num_tc;
...@@ -2131,7 +2131,7 @@ void mlx5e_build_rq_param(struct mlx5e_priv *priv, ...@@ -2131,7 +2131,7 @@ void mlx5e_build_rq_param(struct mlx5e_priv *priv,
MLX5_SET(rqc, rqc, vsd, params->vlan_strip_disable); MLX5_SET(rqc, rqc, vsd, params->vlan_strip_disable);
MLX5_SET(rqc, rqc, scatter_fcs, params->scatter_fcs_en); MLX5_SET(rqc, rqc, scatter_fcs, params->scatter_fcs_en);
param->wq.buf_numa_node = dev_to_node(mdev->device); param->wq.buf_numa_node = dev_to_node(mlx5_core_dma_dev(mdev));
mlx5e_build_rx_cq_param(priv, params, xsk, &param->cqp); mlx5e_build_rx_cq_param(priv, params, xsk, &param->cqp);
} }
...@@ -2147,7 +2147,7 @@ static void mlx5e_build_drop_rq_param(struct mlx5e_priv *priv, ...@@ -2147,7 +2147,7 @@ static void mlx5e_build_drop_rq_param(struct mlx5e_priv *priv,
mlx5e_get_rqwq_log_stride(MLX5_WQ_TYPE_CYCLIC, 1)); mlx5e_get_rqwq_log_stride(MLX5_WQ_TYPE_CYCLIC, 1));
MLX5_SET(rqc, rqc, counter_set_id, priv->drop_rq_q_counter); MLX5_SET(rqc, rqc, counter_set_id, priv->drop_rq_q_counter);
param->wq.buf_numa_node = dev_to_node(mdev->device); param->wq.buf_numa_node = dev_to_node(mlx5_core_dma_dev(mdev));
} }
void mlx5e_build_sq_param_common(struct mlx5e_priv *priv, void mlx5e_build_sq_param_common(struct mlx5e_priv *priv,
...@@ -2159,7 +2159,7 @@ void mlx5e_build_sq_param_common(struct mlx5e_priv *priv, ...@@ -2159,7 +2159,7 @@ void mlx5e_build_sq_param_common(struct mlx5e_priv *priv,
MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB)); MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB));
MLX5_SET(wq, wq, pd, priv->mdev->mlx5e_res.pdn); MLX5_SET(wq, wq, pd, priv->mdev->mlx5e_res.pdn);
param->wq.buf_numa_node = dev_to_node(priv->mdev->device); param->wq.buf_numa_node = dev_to_node(mlx5_core_dma_dev(priv->mdev));
} }
static void mlx5e_build_sq_param(struct mlx5e_priv *priv, static void mlx5e_build_sq_param(struct mlx5e_priv *priv,
...@@ -3197,8 +3197,8 @@ static int mlx5e_alloc_drop_cq(struct mlx5_core_dev *mdev, ...@@ -3197,8 +3197,8 @@ static int mlx5e_alloc_drop_cq(struct mlx5_core_dev *mdev,
struct mlx5e_cq *cq, struct mlx5e_cq *cq,
struct mlx5e_cq_param *param) struct mlx5e_cq_param *param)
{ {
param->wq.buf_numa_node = dev_to_node(mdev->device); param->wq.buf_numa_node = dev_to_node(mlx5_core_dma_dev(mdev));
param->wq.db_numa_node = dev_to_node(mdev->device); param->wq.db_numa_node = dev_to_node(mlx5_core_dma_dev(mdev));
return mlx5e_alloc_cq_common(mdev, param, cq); return mlx5e_alloc_cq_common(mdev, param, cq);
} }
......
...@@ -54,7 +54,7 @@ static int mlx5_fpga_conn_map_buf(struct mlx5_fpga_conn *conn, ...@@ -54,7 +54,7 @@ static int mlx5_fpga_conn_map_buf(struct mlx5_fpga_conn *conn,
if (unlikely(!buf->sg[0].data)) if (unlikely(!buf->sg[0].data))
goto out; goto out;
dma_device = &conn->fdev->mdev->pdev->dev; dma_device = mlx5_core_dma_dev(conn->fdev->mdev);
buf->sg[0].dma_addr = dma_map_single(dma_device, buf->sg[0].data, buf->sg[0].dma_addr = dma_map_single(dma_device, buf->sg[0].data,
buf->sg[0].size, buf->dma_dir); buf->sg[0].size, buf->dma_dir);
err = dma_mapping_error(dma_device, buf->sg[0].dma_addr); err = dma_mapping_error(dma_device, buf->sg[0].dma_addr);
...@@ -86,7 +86,7 @@ static void mlx5_fpga_conn_unmap_buf(struct mlx5_fpga_conn *conn, ...@@ -86,7 +86,7 @@ static void mlx5_fpga_conn_unmap_buf(struct mlx5_fpga_conn *conn,
{ {
struct device *dma_device; struct device *dma_device;
dma_device = &conn->fdev->mdev->pdev->dev; dma_device = mlx5_core_dma_dev(conn->fdev->mdev);
if (buf->sg[1].data) if (buf->sg[1].data)
dma_unmap_single(dma_device, buf->sg[1].dma_addr, dma_unmap_single(dma_device, buf->sg[1].dma_addr,
buf->sg[1].size, buf->dma_dir); buf->sg[1].size, buf->dma_dir);
......
...@@ -739,7 +739,7 @@ static int mlx5_pci_init(struct mlx5_core_dev *dev, struct pci_dev *pdev, ...@@ -739,7 +739,7 @@ static int mlx5_pci_init(struct mlx5_core_dev *dev, struct pci_dev *pdev,
pci_set_drvdata(dev->pdev, dev); pci_set_drvdata(dev->pdev, dev);
dev->bar_addr = pci_resource_start(pdev, 0); dev->bar_addr = pci_resource_start(pdev, 0);
priv->numa_node = dev_to_node(&dev->pdev->dev); priv->numa_node = dev_to_node(mlx5_core_dma_dev(dev));
err = mlx5_pci_enable_device(dev); err = mlx5_pci_enable_device(dev);
if (err) { if (err) {
......
...@@ -100,6 +100,11 @@ do { \ ...@@ -100,6 +100,11 @@ do { \
__func__, __LINE__, current->pid, \ __func__, __LINE__, current->pid, \
##__VA_ARGS__) ##__VA_ARGS__)
static inline struct device *mlx5_core_dma_dev(struct mlx5_core_dev *dev)
{
return &dev->pdev->dev;
}
enum { enum {
MLX5_CMD_DATA, /* print command payload only */ MLX5_CMD_DATA, /* print command payload only */
MLX5_CMD_TIME, /* print command execution time */ MLX5_CMD_TIME, /* print command execution time */
......
...@@ -238,7 +238,7 @@ static void free_fwp(struct mlx5_core_dev *dev, struct fw_page *fwp, ...@@ -238,7 +238,7 @@ static void free_fwp(struct mlx5_core_dev *dev, struct fw_page *fwp,
rb_erase(&fwp->rb_node, root); rb_erase(&fwp->rb_node, root);
if (in_free_list) if (in_free_list)
list_del(&fwp->list); list_del(&fwp->list);
dma_unmap_page(dev->device, fwp->addr & MLX5_U64_4K_PAGE_MASK, dma_unmap_page(mlx5_core_dma_dev(dev), fwp->addr & MLX5_U64_4K_PAGE_MASK,
PAGE_SIZE, DMA_BIDIRECTIONAL); PAGE_SIZE, DMA_BIDIRECTIONAL);
__free_page(fwp->page); __free_page(fwp->page);
kfree(fwp); kfree(fwp);
...@@ -265,7 +265,7 @@ static void free_4k(struct mlx5_core_dev *dev, u64 addr, u32 func_id) ...@@ -265,7 +265,7 @@ static void free_4k(struct mlx5_core_dev *dev, u64 addr, u32 func_id)
static int alloc_system_page(struct mlx5_core_dev *dev, u16 func_id) static int alloc_system_page(struct mlx5_core_dev *dev, u16 func_id)
{ {
struct device *device = dev->device; struct device *device = mlx5_core_dma_dev(dev);
int nid = dev_to_node(device); int nid = dev_to_node(device);
struct page *page; struct page *page;
u64 zero_addr = 1; u64 zero_addr = 1;
......
...@@ -831,7 +831,7 @@ static struct mlx5dr_mr *dr_reg_mr(struct mlx5_core_dev *mdev, ...@@ -831,7 +831,7 @@ static struct mlx5dr_mr *dr_reg_mr(struct mlx5_core_dev *mdev,
if (!mr) if (!mr)
return NULL; return NULL;
dma_device = &mdev->pdev->dev; dma_device = mlx5_core_dma_dev(mdev);
dma_addr = dma_map_single(dma_device, buf, size, dma_addr = dma_map_single(dma_device, buf, size,
DMA_BIDIRECTIONAL); DMA_BIDIRECTIONAL);
err = dma_mapping_error(dma_device, dma_addr); err = dma_mapping_error(dma_device, dma_addr);
...@@ -860,7 +860,7 @@ static struct mlx5dr_mr *dr_reg_mr(struct mlx5_core_dev *mdev, ...@@ -860,7 +860,7 @@ static struct mlx5dr_mr *dr_reg_mr(struct mlx5_core_dev *mdev,
static void dr_dereg_mr(struct mlx5_core_dev *mdev, struct mlx5dr_mr *mr) static void dr_dereg_mr(struct mlx5_core_dev *mdev, struct mlx5dr_mr *mr)
{ {
mlx5_core_destroy_mkey(mdev, &mr->mkey); mlx5_core_destroy_mkey(mdev, &mr->mkey);
dma_unmap_single(&mdev->pdev->dev, mr->dma_addr, mr->size, dma_unmap_single(mlx5_core_dma_dev(mdev), mr->dma_addr, mr->size,
DMA_BIDIRECTIONAL); DMA_BIDIRECTIONAL);
kfree(mr); kfree(mr);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment