Commit 4ed131d0 authored by Yishai Hadas's avatar Yishai Hadas Committed by Jason Gunthorpe

IB/mlx5: Expose dynamic mmap allocation

This patch exposes the option to dynamic allocates a UAR, this
functionality will be used in downstream patch in this series as
part of QP creation.

Specifically, the user space driver asks for a UAR allocation in a given
page index, upon success this UAR and its bfregs can be used as part of
QP creation by the user space driver.

To enable allocating more than 256 UARs the page index is encoded in an
extra one byte just after the command byte.
Signed-off-by: default avatarYishai Hadas <yishaih@mellanox.com>
Signed-off-by: default avatarLeon Romanovsky <leon@kernel.org>
Signed-off-by: default avatarJason Gunthorpe <jgg@mellanox.com>
parent 31a78a5a
...@@ -1301,6 +1301,10 @@ static int allocate_uars(struct mlx5_ib_dev *dev, struct mlx5_ib_ucontext *conte ...@@ -1301,6 +1301,10 @@ static int allocate_uars(struct mlx5_ib_dev *dev, struct mlx5_ib_ucontext *conte
mlx5_ib_dbg(dev, "allocated uar %d\n", bfregi->sys_pages[i]); mlx5_ib_dbg(dev, "allocated uar %d\n", bfregi->sys_pages[i]);
} }
for (i = bfregi->num_static_sys_pages; i < bfregi->num_sys_pages; i++)
bfregi->sys_pages[i] = MLX5_IB_INVALID_UAR_INDEX;
return 0; return 0;
error: error:
...@@ -1318,13 +1322,17 @@ static int deallocate_uars(struct mlx5_ib_dev *dev, struct mlx5_ib_ucontext *con ...@@ -1318,13 +1322,17 @@ static int deallocate_uars(struct mlx5_ib_dev *dev, struct mlx5_ib_ucontext *con
int i; int i;
bfregi = &context->bfregi; bfregi = &context->bfregi;
for (i = 0; i < bfregi->num_static_sys_pages; i++) { for (i = 0; i < bfregi->num_sys_pages; i++) {
err = mlx5_cmd_free_uar(dev->mdev, bfregi->sys_pages[i]); if (i < bfregi->num_static_sys_pages ||
if (err) { bfregi->sys_pages[i] != MLX5_IB_INVALID_UAR_INDEX) {
mlx5_ib_warn(dev, "failed to free uar %d\n", i); err = mlx5_cmd_free_uar(dev->mdev, bfregi->sys_pages[i]);
return err; if (err) {
mlx5_ib_warn(dev, "failed to free uar %d, err=%d\n", i, err);
return err;
}
} }
} }
return 0; return 0;
} }
...@@ -1582,15 +1590,13 @@ static int mlx5_ib_dealloc_ucontext(struct ib_ucontext *ibcontext) ...@@ -1582,15 +1590,13 @@ static int mlx5_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
} }
static phys_addr_t uar_index2pfn(struct mlx5_ib_dev *dev, static phys_addr_t uar_index2pfn(struct mlx5_ib_dev *dev,
struct mlx5_bfreg_info *bfregi, int uar_idx)
int idx)
{ {
int fw_uars_per_page; int fw_uars_per_page;
fw_uars_per_page = MLX5_CAP_GEN(dev->mdev, uar_4k) ? MLX5_UARS_IN_PAGE : 1; fw_uars_per_page = MLX5_CAP_GEN(dev->mdev, uar_4k) ? MLX5_UARS_IN_PAGE : 1;
return (pci_resource_start(dev->mdev->pdev, 0) >> PAGE_SHIFT) + return (pci_resource_start(dev->mdev->pdev, 0) >> PAGE_SHIFT) + uar_idx / fw_uars_per_page;
bfregi->sys_pages[idx] / fw_uars_per_page;
} }
static int get_command(unsigned long offset) static int get_command(unsigned long offset)
...@@ -1608,6 +1614,12 @@ static int get_index(unsigned long offset) ...@@ -1608,6 +1614,12 @@ static int get_index(unsigned long offset)
return get_arg(offset); return get_arg(offset);
} }
/* Index resides in an extra byte to enable larger values than 255 */
static int get_extended_index(unsigned long offset)
{
return get_arg(offset) | ((offset >> 16) & 0xff) << 8;
}
static void mlx5_ib_vma_open(struct vm_area_struct *area) static void mlx5_ib_vma_open(struct vm_area_struct *area)
{ {
/* vma_open is called when a new VMA is created on top of our VMA. This /* vma_open is called when a new VMA is created on top of our VMA. This
...@@ -1758,21 +1770,29 @@ static int uar_mmap(struct mlx5_ib_dev *dev, enum mlx5_ib_mmap_cmd cmd, ...@@ -1758,21 +1770,29 @@ static int uar_mmap(struct mlx5_ib_dev *dev, enum mlx5_ib_mmap_cmd cmd,
unsigned long idx; unsigned long idx;
phys_addr_t pfn, pa; phys_addr_t pfn, pa;
pgprot_t prot; pgprot_t prot;
int uars_per_page; u32 bfreg_dyn_idx = 0;
u32 uar_index;
int dyn_uar = (cmd == MLX5_IB_MMAP_ALLOC_WC);
int max_valid_idx = dyn_uar ? bfregi->num_sys_pages :
bfregi->num_static_sys_pages;
if (vma->vm_end - vma->vm_start != PAGE_SIZE) if (vma->vm_end - vma->vm_start != PAGE_SIZE)
return -EINVAL; return -EINVAL;
uars_per_page = get_uars_per_sys_page(dev, bfregi->lib_uar_4k); if (dyn_uar)
idx = get_index(vma->vm_pgoff); idx = get_extended_index(vma->vm_pgoff) + bfregi->num_static_sys_pages;
if (idx % uars_per_page || else
idx * uars_per_page >= bfregi->num_sys_pages) { idx = get_index(vma->vm_pgoff);
mlx5_ib_warn(dev, "invalid uar index %lu\n", idx);
if (idx >= max_valid_idx) {
mlx5_ib_warn(dev, "invalid uar index %lu, max=%d\n",
idx, max_valid_idx);
return -EINVAL; return -EINVAL;
} }
switch (cmd) { switch (cmd) {
case MLX5_IB_MMAP_WC_PAGE: case MLX5_IB_MMAP_WC_PAGE:
case MLX5_IB_MMAP_ALLOC_WC:
/* Some architectures don't support WC memory */ /* Some architectures don't support WC memory */
#if defined(CONFIG_X86) #if defined(CONFIG_X86)
if (!pat_enabled()) if (!pat_enabled())
...@@ -1792,7 +1812,40 @@ static int uar_mmap(struct mlx5_ib_dev *dev, enum mlx5_ib_mmap_cmd cmd, ...@@ -1792,7 +1812,40 @@ static int uar_mmap(struct mlx5_ib_dev *dev, enum mlx5_ib_mmap_cmd cmd,
return -EINVAL; return -EINVAL;
} }
pfn = uar_index2pfn(dev, bfregi, idx); if (dyn_uar) {
int uars_per_page;
uars_per_page = get_uars_per_sys_page(dev, bfregi->lib_uar_4k);
bfreg_dyn_idx = idx * (uars_per_page * MLX5_NON_FP_BFREGS_PER_UAR);
if (bfreg_dyn_idx >= bfregi->total_num_bfregs) {
mlx5_ib_warn(dev, "invalid bfreg_dyn_idx %u, max=%u\n",
bfreg_dyn_idx, bfregi->total_num_bfregs);
return -EINVAL;
}
mutex_lock(&bfregi->lock);
/* Fail if uar already allocated, first bfreg index of each
* page holds its count.
*/
if (bfregi->count[bfreg_dyn_idx]) {
mlx5_ib_warn(dev, "wrong offset, idx %lu is busy, bfregn=%u\n", idx, bfreg_dyn_idx);
mutex_unlock(&bfregi->lock);
return -EINVAL;
}
bfregi->count[bfreg_dyn_idx]++;
mutex_unlock(&bfregi->lock);
err = mlx5_cmd_alloc_uar(dev->mdev, &uar_index);
if (err) {
mlx5_ib_warn(dev, "UAR alloc failed\n");
goto free_bfreg;
}
} else {
uar_index = bfregi->sys_pages[idx];
}
pfn = uar_index2pfn(dev, uar_index);
mlx5_ib_dbg(dev, "uar idx 0x%lx, pfn %pa\n", idx, &pfn); mlx5_ib_dbg(dev, "uar idx 0x%lx, pfn %pa\n", idx, &pfn);
vma->vm_page_prot = prot; vma->vm_page_prot = prot;
...@@ -1801,14 +1854,32 @@ static int uar_mmap(struct mlx5_ib_dev *dev, enum mlx5_ib_mmap_cmd cmd, ...@@ -1801,14 +1854,32 @@ static int uar_mmap(struct mlx5_ib_dev *dev, enum mlx5_ib_mmap_cmd cmd,
if (err) { if (err) {
mlx5_ib_err(dev, "io_remap_pfn_range failed with error=%d, vm_start=0x%lx, pfn=%pa, mmap_cmd=%s\n", mlx5_ib_err(dev, "io_remap_pfn_range failed with error=%d, vm_start=0x%lx, pfn=%pa, mmap_cmd=%s\n",
err, vma->vm_start, &pfn, mmap_cmd2str(cmd)); err, vma->vm_start, &pfn, mmap_cmd2str(cmd));
return -EAGAIN; err = -EAGAIN;
goto err;
} }
pa = pfn << PAGE_SHIFT; pa = pfn << PAGE_SHIFT;
mlx5_ib_dbg(dev, "mapped %s at 0x%lx, PA %pa\n", mmap_cmd2str(cmd), mlx5_ib_dbg(dev, "mapped %s at 0x%lx, PA %pa\n", mmap_cmd2str(cmd),
vma->vm_start, &pa); vma->vm_start, &pa);
return mlx5_ib_set_vma_data(vma, context); err = mlx5_ib_set_vma_data(vma, context);
if (err)
goto err;
if (dyn_uar)
bfregi->sys_pages[idx] = uar_index;
return 0;
err:
if (!dyn_uar)
return err;
mlx5_cmd_free_uar(dev->mdev, idx);
free_bfreg:
mlx5_ib_free_bfreg(dev, bfregi, bfreg_dyn_idx);
return err;
} }
static int mlx5_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vma) static int mlx5_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vma)
...@@ -1823,6 +1894,7 @@ static int mlx5_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vm ...@@ -1823,6 +1894,7 @@ static int mlx5_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vm
case MLX5_IB_MMAP_WC_PAGE: case MLX5_IB_MMAP_WC_PAGE:
case MLX5_IB_MMAP_NC_PAGE: case MLX5_IB_MMAP_NC_PAGE:
case MLX5_IB_MMAP_REGULAR_PAGE: case MLX5_IB_MMAP_REGULAR_PAGE:
case MLX5_IB_MMAP_ALLOC_WC:
return uar_mmap(dev, command, vma, context); return uar_mmap(dev, command, vma, context);
case MLX5_IB_MMAP_GET_CONTIGUOUS_PAGES: case MLX5_IB_MMAP_GET_CONTIGUOUS_PAGES:
......
...@@ -77,6 +77,7 @@ enum mlx5_ib_mmap_cmd { ...@@ -77,6 +77,7 @@ enum mlx5_ib_mmap_cmd {
MLX5_IB_MMAP_NC_PAGE = 3, MLX5_IB_MMAP_NC_PAGE = 3,
/* 5 is chosen in order to be compatible with old versions of libmlx5 */ /* 5 is chosen in order to be compatible with old versions of libmlx5 */
MLX5_IB_MMAP_CORE_CLOCK = 5, MLX5_IB_MMAP_CORE_CLOCK = 5,
MLX5_IB_MMAP_ALLOC_WC = 6,
}; };
enum { enum {
...@@ -112,6 +113,10 @@ enum { ...@@ -112,6 +113,10 @@ enum {
MLX5_TM_MAX_SGE = 1, MLX5_TM_MAX_SGE = 1,
}; };
enum {
MLX5_IB_INVALID_UAR_INDEX = BIT(31),
};
struct mlx5_ib_vma_private_data { struct mlx5_ib_vma_private_data {
struct list_head list; struct list_head list;
struct vm_area_struct *vma; struct vm_area_struct *vma;
...@@ -1021,6 +1026,9 @@ void mlx5_ib_gsi_pkey_change(struct mlx5_ib_gsi_qp *gsi); ...@@ -1021,6 +1026,9 @@ void mlx5_ib_gsi_pkey_change(struct mlx5_ib_gsi_qp *gsi);
int mlx5_ib_generate_wc(struct ib_cq *ibcq, struct ib_wc *wc); int mlx5_ib_generate_wc(struct ib_cq *ibcq, struct ib_wc *wc);
void mlx5_ib_free_bfreg(struct mlx5_ib_dev *dev, struct mlx5_bfreg_info *bfregi,
int bfregn);
static inline void init_query_mad(struct ib_smp *mad) static inline void init_query_mad(struct ib_smp *mad)
{ {
mad->base_version = 1; mad->base_version = 1;
......
...@@ -581,7 +581,7 @@ static int alloc_bfreg(struct mlx5_ib_dev *dev, ...@@ -581,7 +581,7 @@ static int alloc_bfreg(struct mlx5_ib_dev *dev,
return bfregn; return bfregn;
} }
static void free_bfreg(struct mlx5_ib_dev *dev, struct mlx5_bfreg_info *bfregi, int bfregn) void mlx5_ib_free_bfreg(struct mlx5_ib_dev *dev, struct mlx5_bfreg_info *bfregi, int bfregn)
{ {
mutex_lock(&bfregi->lock); mutex_lock(&bfregi->lock);
bfregi->count[bfregn]--; bfregi->count[bfregn]--;
...@@ -874,7 +874,7 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd, ...@@ -874,7 +874,7 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
ib_umem_release(ubuffer->umem); ib_umem_release(ubuffer->umem);
err_bfreg: err_bfreg:
free_bfreg(dev, &context->bfregi, bfregn); mlx5_ib_free_bfreg(dev, &context->bfregi, bfregn);
return err; return err;
} }
...@@ -887,7 +887,7 @@ static void destroy_qp_user(struct mlx5_ib_dev *dev, struct ib_pd *pd, ...@@ -887,7 +887,7 @@ static void destroy_qp_user(struct mlx5_ib_dev *dev, struct ib_pd *pd,
mlx5_ib_db_unmap_user(context, &qp->db); mlx5_ib_db_unmap_user(context, &qp->db);
if (base->ubuffer.umem) if (base->ubuffer.umem)
ib_umem_release(base->ubuffer.umem); ib_umem_release(base->ubuffer.umem);
free_bfreg(dev, &context->bfregi, qp->bfregn); mlx5_ib_free_bfreg(dev, &context->bfregi, qp->bfregn);
} }
static int create_kernel_qp(struct mlx5_ib_dev *dev, static int create_kernel_qp(struct mlx5_ib_dev *dev,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment