Commit c282da41 authored by Jason Gunthorpe's avatar Jason Gunthorpe Committed by Doug Ledford

RDMA/mlx4: Use rdma_user_mmap_io

Rely on the new core code helper to map BAR memory from the driver.
Signed-off-by: default avatarJason Gunthorpe <jgg@mellanox.com>
Signed-off-by: default avatarLeon Romanovsky <leonro@mellanox.com>
Signed-off-by: default avatarDoug Ledford <dledford@redhat.com>
parent 5f9794dc
...@@ -1138,144 +1138,50 @@ static int mlx4_ib_dealloc_ucontext(struct ib_ucontext *ibcontext) ...@@ -1138,144 +1138,50 @@ static int mlx4_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
return 0; return 0;
} }
static void mlx4_ib_vma_open(struct vm_area_struct *area)
{
/* vma_open is called when a new VMA is created on top of our VMA.
* This is done through either mremap flow or split_vma (usually due
* to mlock, madvise, munmap, etc.). We do not support a clone of the
* vma, as this VMA is strongly hardware related. Therefore we set the
* vm_ops of the newly created/cloned VMA to NULL, to prevent it from
* calling us again and trying to do incorrect actions. We assume that
* the original vma size is exactly a single page that there will be no
* "splitting" operations on.
*/
area->vm_ops = NULL;
}
static void mlx4_ib_vma_close(struct vm_area_struct *area)
{
struct mlx4_ib_vma_private_data *mlx4_ib_vma_priv_data;
/* It's guaranteed that all VMAs opened on a FD are closed before the
* file itself is closed, therefore no sync is needed with the regular
* closing flow. (e.g. mlx4_ib_dealloc_ucontext) However need a sync
* with accessing the vma as part of mlx4_ib_disassociate_ucontext.
* The close operation is usually called under mm->mmap_sem except when
* process is exiting. The exiting case is handled explicitly as part
* of mlx4_ib_disassociate_ucontext.
*/
mlx4_ib_vma_priv_data = (struct mlx4_ib_vma_private_data *)
area->vm_private_data;
/* set the vma context pointer to null in the mlx4_ib driver's private
* data to protect against a race condition in mlx4_ib_dissassociate_ucontext().
*/
mlx4_ib_vma_priv_data->vma = NULL;
}
static const struct vm_operations_struct mlx4_ib_vm_ops = {
.open = mlx4_ib_vma_open,
.close = mlx4_ib_vma_close
};
static void mlx4_ib_disassociate_ucontext(struct ib_ucontext *ibcontext) static void mlx4_ib_disassociate_ucontext(struct ib_ucontext *ibcontext)
{ {
int i;
struct vm_area_struct *vma;
struct mlx4_ib_ucontext *context = to_mucontext(ibcontext);
/* need to protect from a race on closing the vma as part of
* mlx4_ib_vma_close().
*/
for (i = 0; i < HW_BAR_COUNT; i++) {
vma = context->hw_bar_info[i].vma;
if (!vma)
continue;
zap_vma_ptes(context->hw_bar_info[i].vma,
context->hw_bar_info[i].vma->vm_start, PAGE_SIZE);
context->hw_bar_info[i].vma->vm_flags &=
~(VM_SHARED | VM_MAYSHARE);
/* context going to be destroyed, should not access ops any more */
context->hw_bar_info[i].vma->vm_ops = NULL;
}
}
static void mlx4_ib_set_vma_data(struct vm_area_struct *vma,
struct mlx4_ib_vma_private_data *vma_private_data)
{
vma_private_data->vma = vma;
vma->vm_private_data = vma_private_data;
vma->vm_ops = &mlx4_ib_vm_ops;
} }
static int mlx4_ib_mmap(struct ib_ucontext *context, struct vm_area_struct *vma) static int mlx4_ib_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
{ {
struct mlx4_ib_dev *dev = to_mdev(context->device); struct mlx4_ib_dev *dev = to_mdev(context->device);
struct mlx4_ib_ucontext *mucontext = to_mucontext(context);
if (vma->vm_end - vma->vm_start != PAGE_SIZE)
return -EINVAL;
if (vma->vm_pgoff == 0) {
/* We prevent double mmaping on same context */
if (mucontext->hw_bar_info[HW_BAR_DB].vma)
return -EINVAL;
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
if (io_remap_pfn_range(vma, vma->vm_start,
to_mucontext(context)->uar.pfn,
PAGE_SIZE, vma->vm_page_prot))
return -EAGAIN;
mlx4_ib_set_vma_data(vma, &mucontext->hw_bar_info[HW_BAR_DB]); switch (vma->vm_pgoff) {
case 0:
return rdma_user_mmap_io(context, vma,
to_mucontext(context)->uar.pfn,
PAGE_SIZE,
pgprot_noncached(vma->vm_page_prot));
} else if (vma->vm_pgoff == 1 && dev->dev->caps.bf_reg_size != 0) { case 1:
/* We prevent double mmaping on same context */ if (dev->dev->caps.bf_reg_size == 0)
if (mucontext->hw_bar_info[HW_BAR_BF].vma)
return -EINVAL; return -EINVAL;
return rdma_user_mmap_io(
context, vma,
to_mucontext(context)->uar.pfn +
dev->dev->caps.num_uars,
PAGE_SIZE, pgprot_writecombine(vma->vm_page_prot));
vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); case 3: {
if (io_remap_pfn_range(vma, vma->vm_start,
to_mucontext(context)->uar.pfn +
dev->dev->caps.num_uars,
PAGE_SIZE, vma->vm_page_prot))
return -EAGAIN;
mlx4_ib_set_vma_data(vma, &mucontext->hw_bar_info[HW_BAR_BF]);
} else if (vma->vm_pgoff == 3) {
struct mlx4_clock_params params; struct mlx4_clock_params params;
int ret; int ret;
/* We prevent double mmaping on same context */
if (mucontext->hw_bar_info[HW_BAR_CLOCK].vma)
return -EINVAL;
ret = mlx4_get_internal_clock_params(dev->dev, &params); ret = mlx4_get_internal_clock_params(dev->dev, &params);
if (ret) if (ret)
return ret; return ret;
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); return rdma_user_mmap_io(
if (io_remap_pfn_range(vma, vma->vm_start, context, vma,
(pci_resource_start(dev->dev->persist->pdev, (pci_resource_start(dev->dev->persist->pdev,
params.bar) + params.bar) +
params.offset) params.offset) >>
>> PAGE_SHIFT, PAGE_SHIFT,
PAGE_SIZE, vma->vm_page_prot)) PAGE_SIZE, pgprot_noncached(vma->vm_page_prot));
return -EAGAIN;
mlx4_ib_set_vma_data(vma,
&mucontext->hw_bar_info[HW_BAR_CLOCK]);
} else {
return -EINVAL;
} }
return 0; default:
return -EINVAL;
}
} }
static struct ib_pd *mlx4_ib_alloc_pd(struct ib_device *ibdev, static struct ib_pd *mlx4_ib_alloc_pd(struct ib_device *ibdev,
......
...@@ -80,16 +80,11 @@ enum hw_bar_type { ...@@ -80,16 +80,11 @@ enum hw_bar_type {
HW_BAR_COUNT HW_BAR_COUNT
}; };
struct mlx4_ib_vma_private_data {
struct vm_area_struct *vma;
};
struct mlx4_ib_ucontext { struct mlx4_ib_ucontext {
struct ib_ucontext ibucontext; struct ib_ucontext ibucontext;
struct mlx4_uar uar; struct mlx4_uar uar;
struct list_head db_page_list; struct list_head db_page_list;
struct mutex db_page_mutex; struct mutex db_page_mutex;
struct mlx4_ib_vma_private_data hw_bar_info[HW_BAR_COUNT];
struct list_head wqn_ranges_list; struct list_head wqn_ranges_list;
struct mutex wqn_ranges_mutex; /* protect wqn_ranges_list */ struct mutex wqn_ranges_mutex; /* protect wqn_ranges_list */
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment