Commit bda65b42 authored by David S. Miller's avatar David S. Miller

Merge tag 'mlx5-4kuar-for-4.11' of git://git.kernel.org/pub/scm/linux/kernel/git/mellanox/linux

Saeed Mahameed says:

====================
mlx5 4K UAR

The following series of patches optimizes the usage of the UAR area which is
contained within the BAR 0-1. Previous versions of the firmware and the driver
assumed each system page contains a single UAR. This patch set will query the
firmware for a new capability that if published, means that the firmware can
support UARs of fixed 4K regardless of system page size. In the case of
powerpc, where page size equals 64KB, this means we can utilize 16 UARs per
system page. Since user space processes by default consume eight UARs per
context this means that with this change a process will need a single system
page to fulfill that requirement and in fact make use of more UARs which is
better in terms of performance.

In addition to optimizing user-space processes, we introduce an allocator
that can be used by kernel consumers to allocate blue flame registers
(which are areas within a UAR that are used to write doorbells). This provides
further optimization on using the UAR area since the Ethernet driver makes
use of a single blue flame register per system page and now it will use two
blue flame registers per 4K.

The series also makes changes to naming conventions and now the terms used in
the driver code match the terms used in the PRM (programmers reference manual).
Thus, what used to be called UUAR (micro UAR) is now called BFREG (blue flame
register).

In order to support compatibility between different versions of
library/driver/firmware, the library has now means to notify the kernel driver
that it supports the new scheme and the kernel can notify the library if it
supports this extension. So mixed versions of libraries can run concurrently
without any issues.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents b369e7fd f502d834
...@@ -689,7 +689,7 @@ int mlx5_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags) ...@@ -689,7 +689,7 @@ int mlx5_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
{ {
struct mlx5_core_dev *mdev = to_mdev(ibcq->device)->mdev; struct mlx5_core_dev *mdev = to_mdev(ibcq->device)->mdev;
struct mlx5_ib_cq *cq = to_mcq(ibcq); struct mlx5_ib_cq *cq = to_mcq(ibcq);
void __iomem *uar_page = mdev->priv.uuari.uars[0].map; void __iomem *uar_page = mdev->priv.uar->map;
unsigned long irq_flags; unsigned long irq_flags;
int ret = 0; int ret = 0;
...@@ -704,9 +704,7 @@ int mlx5_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags) ...@@ -704,9 +704,7 @@ int mlx5_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
mlx5_cq_arm(&cq->mcq, mlx5_cq_arm(&cq->mcq,
(flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ? (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ?
MLX5_CQ_DB_REQ_NOT_SOL : MLX5_CQ_DB_REQ_NOT, MLX5_CQ_DB_REQ_NOT_SOL : MLX5_CQ_DB_REQ_NOT,
uar_page, uar_page, to_mcq(ibcq)->mcq.cons_index);
MLX5_GET_DOORBELL_LOCK(&mdev->priv.cq_uar_lock),
to_mcq(ibcq)->mcq.cons_index);
return ret; return ret;
} }
...@@ -790,7 +788,7 @@ static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata, ...@@ -790,7 +788,7 @@ static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata,
MLX5_SET(cqc, cqc, log_page_size, MLX5_SET(cqc, cqc, log_page_size,
page_shift - MLX5_ADAPTER_PAGE_SHIFT); page_shift - MLX5_ADAPTER_PAGE_SHIFT);
*index = to_mucontext(context)->uuari.uars[0].index; *index = to_mucontext(context)->bfregi.sys_pages[0];
if (ucmd.cqe_comp_en == 1) { if (ucmd.cqe_comp_en == 1) {
if (unlikely((*cqe_size != 64) || if (unlikely((*cqe_size != 64) ||
...@@ -886,7 +884,7 @@ static int create_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq, ...@@ -886,7 +884,7 @@ static int create_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
MLX5_SET(cqc, cqc, log_page_size, MLX5_SET(cqc, cqc, log_page_size,
cq->buf.buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT); cq->buf.buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT);
*index = dev->mdev->priv.uuari.uars[0].index; *index = dev->mdev->priv.uar->index;
return 0; return 0;
......
...@@ -992,6 +992,86 @@ static int mlx5_ib_modify_port(struct ib_device *ibdev, u8 port, int mask, ...@@ -992,6 +992,86 @@ static int mlx5_ib_modify_port(struct ib_device *ibdev, u8 port, int mask,
return err; return err;
} }
static void print_lib_caps(struct mlx5_ib_dev *dev, u64 caps)
{
mlx5_ib_dbg(dev, "MLX5_LIB_CAP_4K_UAR = %s\n",
caps & MLX5_LIB_CAP_4K_UAR ? "y" : "n");
}
static int calc_total_bfregs(struct mlx5_ib_dev *dev, bool lib_uar_4k,
struct mlx5_ib_alloc_ucontext_req_v2 *req,
u32 *num_sys_pages)
{
int uars_per_sys_page;
int bfregs_per_sys_page;
int ref_bfregs = req->total_num_bfregs;
if (req->total_num_bfregs == 0)
return -EINVAL;
BUILD_BUG_ON(MLX5_MAX_BFREGS % MLX5_NON_FP_BFREGS_IN_PAGE);
BUILD_BUG_ON(MLX5_MAX_BFREGS < MLX5_NON_FP_BFREGS_IN_PAGE);
if (req->total_num_bfregs > MLX5_MAX_BFREGS)
return -ENOMEM;
uars_per_sys_page = get_uars_per_sys_page(dev, lib_uar_4k);
bfregs_per_sys_page = uars_per_sys_page * MLX5_NON_FP_BFREGS_PER_UAR;
req->total_num_bfregs = ALIGN(req->total_num_bfregs, bfregs_per_sys_page);
*num_sys_pages = req->total_num_bfregs / bfregs_per_sys_page;
if (req->num_low_latency_bfregs > req->total_num_bfregs - 1)
return -EINVAL;
mlx5_ib_dbg(dev, "uar_4k: fw support %s, lib support %s, user requested %d bfregs, alloated %d, using %d sys pages\n",
MLX5_CAP_GEN(dev->mdev, uar_4k) ? "yes" : "no",
lib_uar_4k ? "yes" : "no", ref_bfregs,
req->total_num_bfregs, *num_sys_pages);
return 0;
}
static int allocate_uars(struct mlx5_ib_dev *dev, struct mlx5_ib_ucontext *context)
{
struct mlx5_bfreg_info *bfregi;
int err;
int i;
bfregi = &context->bfregi;
for (i = 0; i < bfregi->num_sys_pages; i++) {
err = mlx5_cmd_alloc_uar(dev->mdev, &bfregi->sys_pages[i]);
if (err)
goto error;
mlx5_ib_dbg(dev, "allocated uar %d\n", bfregi->sys_pages[i]);
}
return 0;
error:
for (--i; i >= 0; i--)
if (mlx5_cmd_free_uar(dev->mdev, bfregi->sys_pages[i]))
mlx5_ib_warn(dev, "failed to free uar %d\n", i);
return err;
}
static int deallocate_uars(struct mlx5_ib_dev *dev, struct mlx5_ib_ucontext *context)
{
struct mlx5_bfreg_info *bfregi;
int err;
int i;
bfregi = &context->bfregi;
for (i = 0; i < bfregi->num_sys_pages; i++) {
err = mlx5_cmd_free_uar(dev->mdev, bfregi->sys_pages[i]);
if (err) {
mlx5_ib_warn(dev, "failed to free uar %d\n", i);
return err;
}
}
return 0;
}
static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev, static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
struct ib_udata *udata) struct ib_udata *udata)
{ {
...@@ -999,17 +1079,13 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev, ...@@ -999,17 +1079,13 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
struct mlx5_ib_alloc_ucontext_req_v2 req = {}; struct mlx5_ib_alloc_ucontext_req_v2 req = {};
struct mlx5_ib_alloc_ucontext_resp resp = {}; struct mlx5_ib_alloc_ucontext_resp resp = {};
struct mlx5_ib_ucontext *context; struct mlx5_ib_ucontext *context;
struct mlx5_uuar_info *uuari; struct mlx5_bfreg_info *bfregi;
struct mlx5_uar *uars;
int gross_uuars;
int num_uars;
int ver; int ver;
int uuarn;
int err; int err;
int i;
size_t reqlen; size_t reqlen;
size_t min_req_v2 = offsetof(struct mlx5_ib_alloc_ucontext_req_v2, size_t min_req_v2 = offsetof(struct mlx5_ib_alloc_ucontext_req_v2,
max_cqe_version); max_cqe_version);
bool lib_uar_4k;
if (!dev->ib_active) if (!dev->ib_active)
return ERR_PTR(-EAGAIN); return ERR_PTR(-EAGAIN);
...@@ -1032,27 +1108,14 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev, ...@@ -1032,27 +1108,14 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
if (req.flags) if (req.flags)
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
if (req.total_num_uuars > MLX5_MAX_UUARS)
return ERR_PTR(-ENOMEM);
if (req.total_num_uuars == 0)
return ERR_PTR(-EINVAL);
if (req.comp_mask || req.reserved0 || req.reserved1 || req.reserved2) if (req.comp_mask || req.reserved0 || req.reserved1 || req.reserved2)
return ERR_PTR(-EOPNOTSUPP); return ERR_PTR(-EOPNOTSUPP);
if (reqlen > sizeof(req) && req.total_num_bfregs = ALIGN(req.total_num_bfregs,
!ib_is_udata_cleared(udata, sizeof(req), MLX5_NON_FP_BFREGS_PER_UAR);
reqlen - sizeof(req))) if (req.num_low_latency_bfregs > req.total_num_bfregs - 1)
return ERR_PTR(-EOPNOTSUPP);
req.total_num_uuars = ALIGN(req.total_num_uuars,
MLX5_NON_FP_BF_REGS_PER_PAGE);
if (req.num_low_latency_uuars > req.total_num_uuars - 1)
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
num_uars = req.total_num_uuars / MLX5_NON_FP_BF_REGS_PER_PAGE;
gross_uuars = num_uars * MLX5_BF_REGS_PER_PAGE;
resp.qp_tab_size = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp); resp.qp_tab_size = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp);
if (mlx5_core_is_pf(dev->mdev) && MLX5_CAP_GEN(dev->mdev, bf)) if (mlx5_core_is_pf(dev->mdev) && MLX5_CAP_GEN(dev->mdev, bf))
resp.bf_reg_size = 1 << MLX5_CAP_GEN(dev->mdev, log_bf_reg_size); resp.bf_reg_size = 1 << MLX5_CAP_GEN(dev->mdev, log_bf_reg_size);
...@@ -1065,6 +1128,10 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev, ...@@ -1065,6 +1128,10 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
resp.cqe_version = min_t(__u8, resp.cqe_version = min_t(__u8,
(__u8)MLX5_CAP_GEN(dev->mdev, cqe_version), (__u8)MLX5_CAP_GEN(dev->mdev, cqe_version),
req.max_cqe_version); req.max_cqe_version);
resp.log_uar_size = MLX5_CAP_GEN(dev->mdev, uar_4k) ?
MLX5_ADAPTER_PAGE_SHIFT : PAGE_SHIFT;
resp.num_uars_per_page = MLX5_CAP_GEN(dev->mdev, uar_4k) ?
MLX5_CAP_GEN(dev->mdev, num_of_uars_per_page) : 1;
resp.response_length = min(offsetof(typeof(resp), response_length) + resp.response_length = min(offsetof(typeof(resp), response_length) +
sizeof(resp.response_length), udata->outlen); sizeof(resp.response_length), udata->outlen);
...@@ -1072,41 +1139,34 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev, ...@@ -1072,41 +1139,34 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
if (!context) if (!context)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
uuari = &context->uuari; lib_uar_4k = req.lib_caps & MLX5_LIB_CAP_4K_UAR;
mutex_init(&uuari->lock); bfregi = &context->bfregi;
uars = kcalloc(num_uars, sizeof(*uars), GFP_KERNEL);
if (!uars) { /* updates req->total_num_bfregs */
err = -ENOMEM; err = calc_total_bfregs(dev, lib_uar_4k, &req, &bfregi->num_sys_pages);
if (err)
goto out_ctx; goto out_ctx;
}
uuari->bitmap = kcalloc(BITS_TO_LONGS(gross_uuars), mutex_init(&bfregi->lock);
sizeof(*uuari->bitmap), bfregi->lib_uar_4k = lib_uar_4k;
bfregi->count = kcalloc(req.total_num_bfregs, sizeof(*bfregi->count),
GFP_KERNEL); GFP_KERNEL);
if (!uuari->bitmap) { if (!bfregi->count) {
err = -ENOMEM; err = -ENOMEM;
goto out_uar_ctx; goto out_ctx;
}
/*
* clear all fast path uuars
*/
for (i = 0; i < gross_uuars; i++) {
uuarn = i & 3;
if (uuarn == 2 || uuarn == 3)
set_bit(i, uuari->bitmap);
} }
uuari->count = kcalloc(gross_uuars, sizeof(*uuari->count), GFP_KERNEL); bfregi->sys_pages = kcalloc(bfregi->num_sys_pages,
if (!uuari->count) { sizeof(*bfregi->sys_pages),
GFP_KERNEL);
if (!bfregi->sys_pages) {
err = -ENOMEM; err = -ENOMEM;
goto out_bitmap; goto out_count;
} }
for (i = 0; i < num_uars; i++) { err = allocate_uars(dev, context);
err = mlx5_cmd_alloc_uar(dev->mdev, &uars[i].index); if (err)
if (err) goto out_sys_pages;
goto out_count;
}
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
context->ibucontext.invalidate_range = &mlx5_ib_invalidate_range; context->ibucontext.invalidate_range = &mlx5_ib_invalidate_range;
...@@ -1130,7 +1190,7 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev, ...@@ -1130,7 +1190,7 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
INIT_LIST_HEAD(&context->db_page_list); INIT_LIST_HEAD(&context->db_page_list);
mutex_init(&context->db_page_mutex); mutex_init(&context->db_page_mutex);
resp.tot_uuars = req.total_num_uuars; resp.tot_bfregs = req.total_num_bfregs;
resp.num_ports = MLX5_CAP_GEN(dev->mdev, num_ports); resp.num_ports = MLX5_CAP_GEN(dev->mdev, num_ports);
if (field_avail(typeof(resp), cqe_version, udata->outlen)) if (field_avail(typeof(resp), cqe_version, udata->outlen))
...@@ -1148,26 +1208,32 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev, ...@@ -1148,26 +1208,32 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
* pretend we don't support reading the HCA's core clock. This is also * pretend we don't support reading the HCA's core clock. This is also
* forced by mmap function. * forced by mmap function.
*/ */
if (PAGE_SIZE <= 4096 && if (field_avail(typeof(resp), hca_core_clock_offset, udata->outlen)) {
field_avail(typeof(resp), hca_core_clock_offset, udata->outlen)) { if (PAGE_SIZE <= 4096) {
resp.comp_mask |= resp.comp_mask |=
MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_CORE_CLOCK_OFFSET; MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_CORE_CLOCK_OFFSET;
resp.hca_core_clock_offset = resp.hca_core_clock_offset =
offsetof(struct mlx5_init_seg, internal_timer_h) % offsetof(struct mlx5_init_seg, internal_timer_h) % PAGE_SIZE;
PAGE_SIZE; }
resp.response_length += sizeof(resp.hca_core_clock_offset) + resp.response_length += sizeof(resp.hca_core_clock_offset) +
sizeof(resp.reserved2); sizeof(resp.reserved2);
} }
if (field_avail(typeof(resp), log_uar_size, udata->outlen))
resp.response_length += sizeof(resp.log_uar_size);
if (field_avail(typeof(resp), num_uars_per_page, udata->outlen))
resp.response_length += sizeof(resp.num_uars_per_page);
err = ib_copy_to_udata(udata, &resp, resp.response_length); err = ib_copy_to_udata(udata, &resp, resp.response_length);
if (err) if (err)
goto out_td; goto out_td;
uuari->ver = ver; bfregi->ver = ver;
uuari->num_low_latency_uuars = req.num_low_latency_uuars; bfregi->num_low_latency_bfregs = req.num_low_latency_bfregs;
uuari->uars = uars;
uuari->num_uars = num_uars;
context->cqe_version = resp.cqe_version; context->cqe_version = resp.cqe_version;
context->lib_caps = req.lib_caps;
print_lib_caps(dev, context->lib_caps);
return &context->ibucontext; return &context->ibucontext;
...@@ -1179,19 +1245,17 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev, ...@@ -1179,19 +1245,17 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
free_page(context->upd_xlt_page); free_page(context->upd_xlt_page);
out_uars: out_uars:
for (i--; i >= 0; i--) deallocate_uars(dev, context);
mlx5_cmd_free_uar(dev->mdev, uars[i].index);
out_count:
kfree(uuari->count);
out_bitmap: out_sys_pages:
kfree(uuari->bitmap); kfree(bfregi->sys_pages);
out_uar_ctx: out_count:
kfree(uars); kfree(bfregi->count);
out_ctx: out_ctx:
kfree(context); kfree(context);
return ERR_PTR(err); return ERR_PTR(err);
} }
...@@ -1199,30 +1263,31 @@ static int mlx5_ib_dealloc_ucontext(struct ib_ucontext *ibcontext) ...@@ -1199,30 +1263,31 @@ static int mlx5_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
{ {
struct mlx5_ib_ucontext *context = to_mucontext(ibcontext); struct mlx5_ib_ucontext *context = to_mucontext(ibcontext);
struct mlx5_ib_dev *dev = to_mdev(ibcontext->device); struct mlx5_ib_dev *dev = to_mdev(ibcontext->device);
struct mlx5_uuar_info *uuari = &context->uuari; struct mlx5_bfreg_info *bfregi;
int i;
bfregi = &context->bfregi;
if (MLX5_CAP_GEN(dev->mdev, log_max_transport_domain)) if (MLX5_CAP_GEN(dev->mdev, log_max_transport_domain))
mlx5_core_dealloc_transport_domain(dev->mdev, context->tdn); mlx5_core_dealloc_transport_domain(dev->mdev, context->tdn);
free_page(context->upd_xlt_page); free_page(context->upd_xlt_page);
deallocate_uars(dev, context);
for (i = 0; i < uuari->num_uars; i++) { kfree(bfregi->sys_pages);
if (mlx5_cmd_free_uar(dev->mdev, uuari->uars[i].index)) kfree(bfregi->count);
mlx5_ib_warn(dev, "failed to free UAR 0x%x\n", uuari->uars[i].index);
}
kfree(uuari->count);
kfree(uuari->bitmap);
kfree(uuari->uars);
kfree(context); kfree(context);
return 0; return 0;
} }
static phys_addr_t uar_index2pfn(struct mlx5_ib_dev *dev, int index) static phys_addr_t uar_index2pfn(struct mlx5_ib_dev *dev,
struct mlx5_bfreg_info *bfregi,
int idx)
{ {
return (pci_resource_start(dev->mdev->pdev, 0) >> PAGE_SHIFT) + index; int fw_uars_per_page;
fw_uars_per_page = MLX5_CAP_GEN(dev->mdev, uar_4k) ? MLX5_UARS_IN_PAGE : 1;
return (pci_resource_start(dev->mdev->pdev, 0) >> PAGE_SHIFT) +
bfregi->sys_pages[idx] / fw_uars_per_page;
} }
static int get_command(unsigned long offset) static int get_command(unsigned long offset)
...@@ -1377,11 +1442,23 @@ static int uar_mmap(struct mlx5_ib_dev *dev, enum mlx5_ib_mmap_cmd cmd, ...@@ -1377,11 +1442,23 @@ static int uar_mmap(struct mlx5_ib_dev *dev, enum mlx5_ib_mmap_cmd cmd,
struct vm_area_struct *vma, struct vm_area_struct *vma,
struct mlx5_ib_ucontext *context) struct mlx5_ib_ucontext *context)
{ {
struct mlx5_uuar_info *uuari = &context->uuari; struct mlx5_bfreg_info *bfregi = &context->bfregi;
int err; int err;
unsigned long idx; unsigned long idx;
phys_addr_t pfn, pa; phys_addr_t pfn, pa;
pgprot_t prot; pgprot_t prot;
int uars_per_page;
if (vma->vm_end - vma->vm_start != PAGE_SIZE)
return -EINVAL;
uars_per_page = get_uars_per_sys_page(dev, bfregi->lib_uar_4k);
idx = get_index(vma->vm_pgoff);
if (idx % uars_per_page ||
idx * uars_per_page >= bfregi->num_sys_pages) {
mlx5_ib_warn(dev, "invalid uar index %lu\n", idx);
return -EINVAL;
}
switch (cmd) { switch (cmd) {
case MLX5_IB_MMAP_WC_PAGE: case MLX5_IB_MMAP_WC_PAGE:
...@@ -1404,14 +1481,7 @@ static int uar_mmap(struct mlx5_ib_dev *dev, enum mlx5_ib_mmap_cmd cmd, ...@@ -1404,14 +1481,7 @@ static int uar_mmap(struct mlx5_ib_dev *dev, enum mlx5_ib_mmap_cmd cmd,
return -EINVAL; return -EINVAL;
} }
if (vma->vm_end - vma->vm_start != PAGE_SIZE) pfn = uar_index2pfn(dev, bfregi, idx);
return -EINVAL;
idx = get_index(vma->vm_pgoff);
if (idx >= uuari->num_uars)
return -EINVAL;
pfn = uar_index2pfn(dev, uuari->uars[idx].index);
mlx5_ib_dbg(dev, "uar idx 0x%lx, pfn %pa\n", idx, &pfn); mlx5_ib_dbg(dev, "uar idx 0x%lx, pfn %pa\n", idx, &pfn);
vma->vm_page_prot = prot; vma->vm_page_prot = prot;
...@@ -3072,8 +3142,6 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev) ...@@ -3072,8 +3142,6 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
if (mlx5_use_mad_ifc(dev)) if (mlx5_use_mad_ifc(dev))
get_ext_port_caps(dev); get_ext_port_caps(dev);
MLX5_INIT_DOORBELL_LOCK(&dev->uar_lock);
if (!mlx5_lag_is_active(mdev)) if (!mlx5_lag_is_active(mdev))
name = "mlx5_%d"; name = "mlx5_%d";
else else
...@@ -3249,9 +3317,21 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev) ...@@ -3249,9 +3317,21 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
if (err) if (err)
goto err_odp; goto err_odp;
dev->mdev->priv.uar = mlx5_get_uars_page(dev->mdev);
if (!dev->mdev->priv.uar)
goto err_q_cnt;
err = mlx5_alloc_bfreg(dev->mdev, &dev->bfreg, false, false);
if (err)
goto err_uar_page;
err = mlx5_alloc_bfreg(dev->mdev, &dev->fp_bfreg, false, true);
if (err)
goto err_bfreg;
err = ib_register_device(&dev->ib_dev, NULL); err = ib_register_device(&dev->ib_dev, NULL);
if (err) if (err)
goto err_q_cnt; goto err_fp_bfreg;
err = create_umr_res(dev); err = create_umr_res(dev);
if (err) if (err)
...@@ -3274,6 +3354,15 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev) ...@@ -3274,6 +3354,15 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
err_dev: err_dev:
ib_unregister_device(&dev->ib_dev); ib_unregister_device(&dev->ib_dev);
err_fp_bfreg:
mlx5_free_bfreg(dev->mdev, &dev->fp_bfreg);
err_bfreg:
mlx5_free_bfreg(dev->mdev, &dev->bfreg);
err_uar_page:
mlx5_put_uars_page(dev->mdev, dev->mdev->priv.uar);
err_q_cnt: err_q_cnt:
mlx5_ib_dealloc_q_counters(dev); mlx5_ib_dealloc_q_counters(dev);
...@@ -3305,6 +3394,9 @@ static void mlx5_ib_remove(struct mlx5_core_dev *mdev, void *context) ...@@ -3305,6 +3394,9 @@ static void mlx5_ib_remove(struct mlx5_core_dev *mdev, void *context)
mlx5_remove_netdev_notifier(dev); mlx5_remove_netdev_notifier(dev);
ib_unregister_device(&dev->ib_dev); ib_unregister_device(&dev->ib_dev);
mlx5_free_bfreg(dev->mdev, &dev->fp_bfreg);
mlx5_free_bfreg(dev->mdev, &dev->bfreg);
mlx5_put_uars_page(dev->mdev, mdev->priv.uar);
mlx5_ib_dealloc_q_counters(dev); mlx5_ib_dealloc_q_counters(dev);
destroy_umrc_res(dev); destroy_umrc_res(dev);
mlx5_ib_odp_remove_one(dev); mlx5_ib_odp_remove_one(dev);
......
...@@ -90,7 +90,6 @@ enum mlx5_ib_latency_class { ...@@ -90,7 +90,6 @@ enum mlx5_ib_latency_class {
MLX5_IB_LATENCY_CLASS_LOW, MLX5_IB_LATENCY_CLASS_LOW,
MLX5_IB_LATENCY_CLASS_MEDIUM, MLX5_IB_LATENCY_CLASS_MEDIUM,
MLX5_IB_LATENCY_CLASS_HIGH, MLX5_IB_LATENCY_CLASS_HIGH,
MLX5_IB_LATENCY_CLASS_FAST_PATH
}; };
enum mlx5_ib_mad_ifc_flags { enum mlx5_ib_mad_ifc_flags {
...@@ -100,7 +99,7 @@ enum mlx5_ib_mad_ifc_flags { ...@@ -100,7 +99,7 @@ enum mlx5_ib_mad_ifc_flags {
}; };
enum { enum {
MLX5_CROSS_CHANNEL_UUAR = 0, MLX5_CROSS_CHANNEL_BFREG = 0,
}; };
enum { enum {
...@@ -120,7 +119,7 @@ struct mlx5_ib_ucontext { ...@@ -120,7 +119,7 @@ struct mlx5_ib_ucontext {
/* protect doorbell record alloc/free /* protect doorbell record alloc/free
*/ */
struct mutex db_page_mutex; struct mutex db_page_mutex;
struct mlx5_uuar_info uuari; struct mlx5_bfreg_info bfregi;
u8 cqe_version; u8 cqe_version;
/* Transport Domain number */ /* Transport Domain number */
u32 tdn; u32 tdn;
...@@ -129,6 +128,7 @@ struct mlx5_ib_ucontext { ...@@ -129,6 +128,7 @@ struct mlx5_ib_ucontext {
unsigned long upd_xlt_page; unsigned long upd_xlt_page;
/* protect ODP/KSM */ /* protect ODP/KSM */
struct mutex upd_xlt_page_mutex; struct mutex upd_xlt_page_mutex;
u64 lib_caps;
}; };
static inline struct mlx5_ib_ucontext *to_mucontext(struct ib_ucontext *ibucontext) static inline struct mlx5_ib_ucontext *to_mucontext(struct ib_ucontext *ibucontext)
...@@ -324,6 +324,12 @@ struct mlx5_ib_raw_packet_qp { ...@@ -324,6 +324,12 @@ struct mlx5_ib_raw_packet_qp {
struct mlx5_ib_rq rq; struct mlx5_ib_rq rq;
}; };
struct mlx5_bf {
int buf_size;
unsigned long offset;
struct mlx5_sq_bfreg *bfreg;
};
struct mlx5_ib_qp { struct mlx5_ib_qp {
struct ib_qp ibqp; struct ib_qp ibqp;
union { union {
...@@ -349,13 +355,13 @@ struct mlx5_ib_qp { ...@@ -349,13 +355,13 @@ struct mlx5_ib_qp {
int wq_sig; int wq_sig;
int scat_cqe; int scat_cqe;
int max_inline_data; int max_inline_data;
struct mlx5_bf *bf; struct mlx5_bf bf;
int has_rq; int has_rq;
/* only for user space QPs. For kernel /* only for user space QPs. For kernel
* we have it from the bf object * we have it from the bf object
*/ */
int uuarn; int bfregn;
int create_type; int create_type;
...@@ -591,7 +597,6 @@ struct mlx5_ib_dev { ...@@ -591,7 +597,6 @@ struct mlx5_ib_dev {
struct ib_device ib_dev; struct ib_device ib_dev;
struct mlx5_core_dev *mdev; struct mlx5_core_dev *mdev;
struct mlx5_roce roce; struct mlx5_roce roce;
MLX5_DECLARE_DOORBELL_LOCK(uar_lock);
int num_ports; int num_ports;
/* serialize update of capability mask /* serialize update of capability mask
*/ */
...@@ -621,6 +626,8 @@ struct mlx5_ib_dev { ...@@ -621,6 +626,8 @@ struct mlx5_ib_dev {
struct list_head qp_list; struct list_head qp_list;
/* Array with num_ports elements */ /* Array with num_ports elements */
struct mlx5_ib_port *port; struct mlx5_ib_port *port;
struct mlx5_sq_bfreg bfreg;
struct mlx5_sq_bfreg fp_bfreg;
}; };
static inline struct mlx5_ib_cq *to_mibcq(struct mlx5_core_cq *mcq) static inline struct mlx5_ib_cq *to_mibcq(struct mlx5_core_cq *mcq)
...@@ -968,4 +975,17 @@ static inline int get_srq_user_index(struct mlx5_ib_ucontext *ucontext, ...@@ -968,4 +975,17 @@ static inline int get_srq_user_index(struct mlx5_ib_ucontext *ucontext,
return verify_assign_uidx(cqe_version, ucmd->uidx, user_index); return verify_assign_uidx(cqe_version, ucmd->uidx, user_index);
} }
static inline int get_uars_per_sys_page(struct mlx5_ib_dev *dev, bool lib_support)
{
return lib_support && MLX5_CAP_GEN(dev->mdev, uar_4k) ?
MLX5_UARS_IN_PAGE : 1;
}
static inline int get_num_uars(struct mlx5_ib_dev *dev,
struct mlx5_bfreg_info *bfregi)
{
return get_uars_per_sys_page(dev, bfregi->lib_uar_4k) * bfregi->num_sys_pages;
}
#endif /* MLX5_IB_H */ #endif /* MLX5_IB_H */
...@@ -475,60 +475,53 @@ static int qp_has_rq(struct ib_qp_init_attr *attr) ...@@ -475,60 +475,53 @@ static int qp_has_rq(struct ib_qp_init_attr *attr)
return 1; return 1;
} }
static int first_med_uuar(void) static int first_med_bfreg(void)
{ {
return 1; return 1;
} }
static int next_uuar(int n) enum {
{ /* this is the first blue flame register in the array of bfregs assigned
n++; * to a processes. Since we do not use it for blue flame but rather
* regular 64 bit doorbells, we do not need a lock for maintaiing
while (((n % 4) & 2)) * "odd/even" order
n++; */
NUM_NON_BLUE_FLAME_BFREGS = 1,
};
return n; static int max_bfregs(struct mlx5_ib_dev *dev, struct mlx5_bfreg_info *bfregi)
{
return get_num_uars(dev, bfregi) * MLX5_NON_FP_BFREGS_PER_UAR;
} }
static int num_med_uuar(struct mlx5_uuar_info *uuari) static int num_med_bfreg(struct mlx5_ib_dev *dev,
struct mlx5_bfreg_info *bfregi)
{ {
int n; int n;
n = uuari->num_uars * MLX5_NON_FP_BF_REGS_PER_PAGE - n = max_bfregs(dev, bfregi) - bfregi->num_low_latency_bfregs -
uuari->num_low_latency_uuars - 1; NUM_NON_BLUE_FLAME_BFREGS;
return n >= 0 ? n : 0; return n >= 0 ? n : 0;
} }
static int max_uuari(struct mlx5_uuar_info *uuari) static int first_hi_bfreg(struct mlx5_ib_dev *dev,
{ struct mlx5_bfreg_info *bfregi)
return uuari->num_uars * 4;
}
static int first_hi_uuar(struct mlx5_uuar_info *uuari)
{ {
int med; int med;
int i;
int t;
med = num_med_uuar(uuari);
for (t = 0, i = first_med_uuar();; i = next_uuar(i)) {
t++;
if (t == med)
return next_uuar(i);
}
return 0; med = num_med_bfreg(dev, bfregi);
return ++med;
} }
static int alloc_high_class_uuar(struct mlx5_uuar_info *uuari) static int alloc_high_class_bfreg(struct mlx5_ib_dev *dev,
struct mlx5_bfreg_info *bfregi)
{ {
int i; int i;
for (i = first_hi_uuar(uuari); i < max_uuari(uuari); i = next_uuar(i)) { for (i = first_hi_bfreg(dev, bfregi); i < max_bfregs(dev, bfregi); i++) {
if (!test_bit(i, uuari->bitmap)) { if (!bfregi->count[i]) {
set_bit(i, uuari->bitmap); bfregi->count[i]++;
uuari->count[i]++;
return i; return i;
} }
} }
...@@ -536,87 +529,61 @@ static int alloc_high_class_uuar(struct mlx5_uuar_info *uuari) ...@@ -536,87 +529,61 @@ static int alloc_high_class_uuar(struct mlx5_uuar_info *uuari)
return -ENOMEM; return -ENOMEM;
} }
static int alloc_med_class_uuar(struct mlx5_uuar_info *uuari) static int alloc_med_class_bfreg(struct mlx5_ib_dev *dev,
struct mlx5_bfreg_info *bfregi)
{ {
int minidx = first_med_uuar(); int minidx = first_med_bfreg();
int i; int i;
for (i = first_med_uuar(); i < first_hi_uuar(uuari); i = next_uuar(i)) { for (i = first_med_bfreg(); i < first_hi_bfreg(dev, bfregi); i++) {
if (uuari->count[i] < uuari->count[minidx]) if (bfregi->count[i] < bfregi->count[minidx])
minidx = i; minidx = i;
if (!bfregi->count[minidx])
break;
} }
uuari->count[minidx]++; bfregi->count[minidx]++;
return minidx; return minidx;
} }
static int alloc_uuar(struct mlx5_uuar_info *uuari, static int alloc_bfreg(struct mlx5_ib_dev *dev,
enum mlx5_ib_latency_class lat) struct mlx5_bfreg_info *bfregi,
enum mlx5_ib_latency_class lat)
{ {
int uuarn = -EINVAL; int bfregn = -EINVAL;
mutex_lock(&uuari->lock); mutex_lock(&bfregi->lock);
switch (lat) { switch (lat) {
case MLX5_IB_LATENCY_CLASS_LOW: case MLX5_IB_LATENCY_CLASS_LOW:
uuarn = 0; BUILD_BUG_ON(NUM_NON_BLUE_FLAME_BFREGS != 1);
uuari->count[uuarn]++; bfregn = 0;
bfregi->count[bfregn]++;
break; break;
case MLX5_IB_LATENCY_CLASS_MEDIUM: case MLX5_IB_LATENCY_CLASS_MEDIUM:
if (uuari->ver < 2) if (bfregi->ver < 2)
uuarn = -ENOMEM; bfregn = -ENOMEM;
else else
uuarn = alloc_med_class_uuar(uuari); bfregn = alloc_med_class_bfreg(dev, bfregi);
break; break;
case MLX5_IB_LATENCY_CLASS_HIGH: case MLX5_IB_LATENCY_CLASS_HIGH:
if (uuari->ver < 2) if (bfregi->ver < 2)
uuarn = -ENOMEM; bfregn = -ENOMEM;
else else
uuarn = alloc_high_class_uuar(uuari); bfregn = alloc_high_class_bfreg(dev, bfregi);
break;
case MLX5_IB_LATENCY_CLASS_FAST_PATH:
uuarn = 2;
break; break;
} }
mutex_unlock(&uuari->lock); mutex_unlock(&bfregi->lock);
return uuarn;
}
static void free_med_class_uuar(struct mlx5_uuar_info *uuari, int uuarn) return bfregn;
{
clear_bit(uuarn, uuari->bitmap);
--uuari->count[uuarn];
} }
static void free_high_class_uuar(struct mlx5_uuar_info *uuari, int uuarn) static void free_bfreg(struct mlx5_ib_dev *dev, struct mlx5_bfreg_info *bfregi, int bfregn)
{ {
clear_bit(uuarn, uuari->bitmap); mutex_lock(&bfregi->lock);
--uuari->count[uuarn]; bfregi->count[bfregn]--;
} mutex_unlock(&bfregi->lock);
static void free_uuar(struct mlx5_uuar_info *uuari, int uuarn)
{
int nuuars = uuari->num_uars * MLX5_BF_REGS_PER_PAGE;
int high_uuar = nuuars - uuari->num_low_latency_uuars;
mutex_lock(&uuari->lock);
if (uuarn == 0) {
--uuari->count[uuarn];
goto out;
}
if (uuarn < high_uuar) {
free_med_class_uuar(uuari, uuarn);
goto out;
}
free_high_class_uuar(uuari, uuarn);
out:
mutex_unlock(&uuari->lock);
} }
static enum mlx5_qp_state to_mlx5_state(enum ib_qp_state state) static enum mlx5_qp_state to_mlx5_state(enum ib_qp_state state)
...@@ -657,9 +624,20 @@ static void mlx5_ib_lock_cqs(struct mlx5_ib_cq *send_cq, ...@@ -657,9 +624,20 @@ static void mlx5_ib_lock_cqs(struct mlx5_ib_cq *send_cq,
static void mlx5_ib_unlock_cqs(struct mlx5_ib_cq *send_cq, static void mlx5_ib_unlock_cqs(struct mlx5_ib_cq *send_cq,
struct mlx5_ib_cq *recv_cq); struct mlx5_ib_cq *recv_cq);
static int uuarn_to_uar_index(struct mlx5_uuar_info *uuari, int uuarn) static int bfregn_to_uar_index(struct mlx5_ib_dev *dev,
struct mlx5_bfreg_info *bfregi, int bfregn)
{ {
return uuari->uars[uuarn / MLX5_BF_REGS_PER_PAGE].index; int bfregs_per_sys_page;
int index_of_sys_page;
int offset;
bfregs_per_sys_page = get_uars_per_sys_page(dev, bfregi->lib_uar_4k) *
MLX5_NON_FP_BFREGS_PER_UAR;
index_of_sys_page = bfregn / bfregs_per_sys_page;
offset = bfregn % bfregs_per_sys_page / MLX5_NON_FP_BFREGS_PER_UAR;
return bfregi->sys_pages[index_of_sys_page] + offset;
} }
static int mlx5_ib_umem_get(struct mlx5_ib_dev *dev, static int mlx5_ib_umem_get(struct mlx5_ib_dev *dev,
...@@ -762,6 +740,13 @@ static int create_user_rq(struct mlx5_ib_dev *dev, struct ib_pd *pd, ...@@ -762,6 +740,13 @@ static int create_user_rq(struct mlx5_ib_dev *dev, struct ib_pd *pd,
return err; return err;
} }
static int adjust_bfregn(struct mlx5_ib_dev *dev,
struct mlx5_bfreg_info *bfregi, int bfregn)
{
return bfregn / MLX5_NON_FP_BFREGS_PER_UAR * MLX5_BFREGS_PER_UAR +
bfregn % MLX5_NON_FP_BFREGS_PER_UAR;
}
static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd, static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
struct mlx5_ib_qp *qp, struct ib_udata *udata, struct mlx5_ib_qp *qp, struct ib_udata *udata,
struct ib_qp_init_attr *attr, struct ib_qp_init_attr *attr,
...@@ -776,7 +761,7 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd, ...@@ -776,7 +761,7 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
int uar_index; int uar_index;
int npages; int npages;
u32 offset = 0; u32 offset = 0;
int uuarn; int bfregn;
int ncont = 0; int ncont = 0;
__be64 *pas; __be64 *pas;
void *qpc; void *qpc;
...@@ -794,27 +779,27 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd, ...@@ -794,27 +779,27 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
*/ */
if (qp->flags & MLX5_IB_QP_CROSS_CHANNEL) if (qp->flags & MLX5_IB_QP_CROSS_CHANNEL)
/* In CROSS_CHANNEL CQ and QP must use the same UAR */ /* In CROSS_CHANNEL CQ and QP must use the same UAR */
uuarn = MLX5_CROSS_CHANNEL_UUAR; bfregn = MLX5_CROSS_CHANNEL_BFREG;
else { else {
uuarn = alloc_uuar(&context->uuari, MLX5_IB_LATENCY_CLASS_HIGH); bfregn = alloc_bfreg(dev, &context->bfregi, MLX5_IB_LATENCY_CLASS_HIGH);
if (uuarn < 0) { if (bfregn < 0) {
mlx5_ib_dbg(dev, "failed to allocate low latency UUAR\n"); mlx5_ib_dbg(dev, "failed to allocate low latency BFREG\n");
mlx5_ib_dbg(dev, "reverting to medium latency\n"); mlx5_ib_dbg(dev, "reverting to medium latency\n");
uuarn = alloc_uuar(&context->uuari, MLX5_IB_LATENCY_CLASS_MEDIUM); bfregn = alloc_bfreg(dev, &context->bfregi, MLX5_IB_LATENCY_CLASS_MEDIUM);
if (uuarn < 0) { if (bfregn < 0) {
mlx5_ib_dbg(dev, "failed to allocate medium latency UUAR\n"); mlx5_ib_dbg(dev, "failed to allocate medium latency BFREG\n");
mlx5_ib_dbg(dev, "reverting to high latency\n"); mlx5_ib_dbg(dev, "reverting to high latency\n");
uuarn = alloc_uuar(&context->uuari, MLX5_IB_LATENCY_CLASS_LOW); bfregn = alloc_bfreg(dev, &context->bfregi, MLX5_IB_LATENCY_CLASS_LOW);
if (uuarn < 0) { if (bfregn < 0) {
mlx5_ib_warn(dev, "uuar allocation failed\n"); mlx5_ib_warn(dev, "bfreg allocation failed\n");
return uuarn; return bfregn;
} }
} }
} }
} }
uar_index = uuarn_to_uar_index(&context->uuari, uuarn); uar_index = bfregn_to_uar_index(dev, &context->bfregi, bfregn);
mlx5_ib_dbg(dev, "uuarn 0x%x, uar_index 0x%x\n", uuarn, uar_index); mlx5_ib_dbg(dev, "bfregn 0x%x, uar_index 0x%x\n", bfregn, uar_index);
qp->rq.offset = 0; qp->rq.offset = 0;
qp->sq.wqe_shift = ilog2(MLX5_SEND_WQE_BB); qp->sq.wqe_shift = ilog2(MLX5_SEND_WQE_BB);
...@@ -822,7 +807,7 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd, ...@@ -822,7 +807,7 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
err = set_user_buf_size(dev, qp, &ucmd, base, attr); err = set_user_buf_size(dev, qp, &ucmd, base, attr);
if (err) if (err)
goto err_uuar; goto err_bfreg;
if (ucmd.buf_addr && ubuffer->buf_size) { if (ucmd.buf_addr && ubuffer->buf_size) {
ubuffer->buf_addr = ucmd.buf_addr; ubuffer->buf_addr = ucmd.buf_addr;
...@@ -831,7 +816,7 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd, ...@@ -831,7 +816,7 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
&ubuffer->umem, &npages, &page_shift, &ubuffer->umem, &npages, &page_shift,
&ncont, &offset); &ncont, &offset);
if (err) if (err)
goto err_uuar; goto err_bfreg;
} else { } else {
ubuffer->umem = NULL; ubuffer->umem = NULL;
} }
...@@ -854,8 +839,8 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd, ...@@ -854,8 +839,8 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
MLX5_SET(qpc, qpc, page_offset, offset); MLX5_SET(qpc, qpc, page_offset, offset);
MLX5_SET(qpc, qpc, uar_page, uar_index); MLX5_SET(qpc, qpc, uar_page, uar_index);
resp->uuar_index = uuarn; resp->bfreg_index = adjust_bfregn(dev, &context->bfregi, bfregn);
qp->uuarn = uuarn; qp->bfregn = bfregn;
err = mlx5_ib_db_map_user(context, ucmd.db_addr, &qp->db); err = mlx5_ib_db_map_user(context, ucmd.db_addr, &qp->db);
if (err) { if (err) {
...@@ -882,13 +867,13 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd, ...@@ -882,13 +867,13 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
if (ubuffer->umem) if (ubuffer->umem)
ib_umem_release(ubuffer->umem); ib_umem_release(ubuffer->umem);
err_uuar: err_bfreg:
free_uuar(&context->uuari, uuarn); free_bfreg(dev, &context->bfregi, bfregn);
return err; return err;
} }
static void destroy_qp_user(struct ib_pd *pd, struct mlx5_ib_qp *qp, static void destroy_qp_user(struct mlx5_ib_dev *dev, struct ib_pd *pd,
struct mlx5_ib_qp_base *base) struct mlx5_ib_qp *qp, struct mlx5_ib_qp_base *base)
{ {
struct mlx5_ib_ucontext *context; struct mlx5_ib_ucontext *context;
...@@ -896,7 +881,7 @@ static void destroy_qp_user(struct ib_pd *pd, struct mlx5_ib_qp *qp, ...@@ -896,7 +881,7 @@ static void destroy_qp_user(struct ib_pd *pd, struct mlx5_ib_qp *qp,
mlx5_ib_db_unmap_user(context, &qp->db); mlx5_ib_db_unmap_user(context, &qp->db);
if (base->ubuffer.umem) if (base->ubuffer.umem)
ib_umem_release(base->ubuffer.umem); ib_umem_release(base->ubuffer.umem);
free_uuar(&context->uuari, qp->uuarn); free_bfreg(dev, &context->bfregi, qp->bfregn);
} }
static int create_kernel_qp(struct mlx5_ib_dev *dev, static int create_kernel_qp(struct mlx5_ib_dev *dev,
...@@ -905,14 +890,10 @@ static int create_kernel_qp(struct mlx5_ib_dev *dev, ...@@ -905,14 +890,10 @@ static int create_kernel_qp(struct mlx5_ib_dev *dev,
u32 **in, int *inlen, u32 **in, int *inlen,
struct mlx5_ib_qp_base *base) struct mlx5_ib_qp_base *base)
{ {
enum mlx5_ib_latency_class lc = MLX5_IB_LATENCY_CLASS_LOW;
struct mlx5_uuar_info *uuari;
int uar_index; int uar_index;
void *qpc; void *qpc;
int uuarn;
int err; int err;
uuari = &dev->mdev->priv.uuari;
if (init_attr->create_flags & ~(IB_QP_CREATE_SIGNATURE_EN | if (init_attr->create_flags & ~(IB_QP_CREATE_SIGNATURE_EN |
IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK | IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK |
IB_QP_CREATE_IPOIB_UD_LSO | IB_QP_CREATE_IPOIB_UD_LSO |
...@@ -920,21 +901,17 @@ static int create_kernel_qp(struct mlx5_ib_dev *dev, ...@@ -920,21 +901,17 @@ static int create_kernel_qp(struct mlx5_ib_dev *dev,
return -EINVAL; return -EINVAL;
if (init_attr->qp_type == MLX5_IB_QPT_REG_UMR) if (init_attr->qp_type == MLX5_IB_QPT_REG_UMR)
lc = MLX5_IB_LATENCY_CLASS_FAST_PATH; qp->bf.bfreg = &dev->fp_bfreg;
else
uuarn = alloc_uuar(uuari, lc); qp->bf.bfreg = &dev->bfreg;
if (uuarn < 0) {
mlx5_ib_dbg(dev, "\n");
return -ENOMEM;
}
qp->bf = &uuari->bfs[uuarn]; qp->bf.buf_size = 1 << MLX5_CAP_GEN(dev->mdev, log_bf_reg_size);
uar_index = qp->bf->uar->index; uar_index = qp->bf.bfreg->index;
err = calc_sq_size(dev, init_attr, qp); err = calc_sq_size(dev, init_attr, qp);
if (err < 0) { if (err < 0) {
mlx5_ib_dbg(dev, "err %d\n", err); mlx5_ib_dbg(dev, "err %d\n", err);
goto err_uuar; return err;
} }
qp->rq.offset = 0; qp->rq.offset = 0;
...@@ -944,7 +921,7 @@ static int create_kernel_qp(struct mlx5_ib_dev *dev, ...@@ -944,7 +921,7 @@ static int create_kernel_qp(struct mlx5_ib_dev *dev,
err = mlx5_buf_alloc(dev->mdev, base->ubuffer.buf_size, &qp->buf); err = mlx5_buf_alloc(dev->mdev, base->ubuffer.buf_size, &qp->buf);
if (err) { if (err) {
mlx5_ib_dbg(dev, "err %d\n", err); mlx5_ib_dbg(dev, "err %d\n", err);
goto err_uuar; return err;
} }
qp->sq.qend = mlx5_get_send_wqe(qp, qp->sq.wqe_cnt); qp->sq.qend = mlx5_get_send_wqe(qp, qp->sq.wqe_cnt);
...@@ -994,34 +971,30 @@ static int create_kernel_qp(struct mlx5_ib_dev *dev, ...@@ -994,34 +971,30 @@ static int create_kernel_qp(struct mlx5_ib_dev *dev,
return 0; return 0;
err_wrid: err_wrid:
mlx5_db_free(dev->mdev, &qp->db);
kfree(qp->sq.wqe_head); kfree(qp->sq.wqe_head);
kfree(qp->sq.w_list); kfree(qp->sq.w_list);
kfree(qp->sq.wrid); kfree(qp->sq.wrid);
kfree(qp->sq.wr_data); kfree(qp->sq.wr_data);
kfree(qp->rq.wrid); kfree(qp->rq.wrid);
mlx5_db_free(dev->mdev, &qp->db);
err_free: err_free:
kvfree(*in); kvfree(*in);
err_buf: err_buf:
mlx5_buf_free(dev->mdev, &qp->buf); mlx5_buf_free(dev->mdev, &qp->buf);
err_uuar:
free_uuar(&dev->mdev->priv.uuari, uuarn);
return err; return err;
} }
static void destroy_qp_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp) static void destroy_qp_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp)
{ {
mlx5_db_free(dev->mdev, &qp->db);
kfree(qp->sq.wqe_head); kfree(qp->sq.wqe_head);
kfree(qp->sq.w_list); kfree(qp->sq.w_list);
kfree(qp->sq.wrid); kfree(qp->sq.wrid);
kfree(qp->sq.wr_data); kfree(qp->sq.wr_data);
kfree(qp->rq.wrid); kfree(qp->rq.wrid);
mlx5_db_free(dev->mdev, &qp->db);
mlx5_buf_free(dev->mdev, &qp->buf); mlx5_buf_free(dev->mdev, &qp->buf);
free_uuar(&dev->mdev->priv.uuari, qp->bf->uuarn);
} }
static u32 get_rx_type(struct mlx5_ib_qp *qp, struct ib_qp_init_attr *attr) static u32 get_rx_type(struct mlx5_ib_qp *qp, struct ib_qp_init_attr *attr)
...@@ -1353,7 +1326,7 @@ static int create_rss_raw_qp_tir(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, ...@@ -1353,7 +1326,7 @@ static int create_rss_raw_qp_tir(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
if (init_attr->create_flags || init_attr->send_cq) if (init_attr->create_flags || init_attr->send_cq)
return -EINVAL; return -EINVAL;
min_resp_len = offsetof(typeof(resp), uuar_index) + sizeof(resp.uuar_index); min_resp_len = offsetof(typeof(resp), bfreg_index) + sizeof(resp.bfreg_index);
if (udata->outlen < min_resp_len) if (udata->outlen < min_resp_len)
return -EINVAL; return -EINVAL;
...@@ -1792,7 +1765,7 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd, ...@@ -1792,7 +1765,7 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
err_create: err_create:
if (qp->create_type == MLX5_QP_USER) if (qp->create_type == MLX5_QP_USER)
destroy_qp_user(pd, qp, base); destroy_qp_user(dev, pd, qp, base);
else if (qp->create_type == MLX5_QP_KERNEL) else if (qp->create_type == MLX5_QP_KERNEL)
destroy_qp_kernel(dev, qp); destroy_qp_kernel(dev, qp);
...@@ -1970,7 +1943,7 @@ static void destroy_qp_common(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp) ...@@ -1970,7 +1943,7 @@ static void destroy_qp_common(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp)
if (qp->create_type == MLX5_QP_KERNEL) if (qp->create_type == MLX5_QP_KERNEL)
destroy_qp_kernel(dev, qp); destroy_qp_kernel(dev, qp);
else if (qp->create_type == MLX5_QP_USER) else if (qp->create_type == MLX5_QP_USER)
destroy_qp_user(&get_pd(qp)->ibpd, qp, base); destroy_qp_user(dev, &get_pd(qp)->ibpd, qp, base);
} }
static const char *ib_qp_type_str(enum ib_qp_type type) static const char *ib_qp_type_str(enum ib_qp_type type)
...@@ -3740,24 +3713,6 @@ static void dump_wqe(struct mlx5_ib_qp *qp, int idx, int size_16) ...@@ -3740,24 +3713,6 @@ static void dump_wqe(struct mlx5_ib_qp *qp, int idx, int size_16)
} }
} }
static void mlx5_bf_copy(u64 __iomem *dst, u64 *src,
unsigned bytecnt, struct mlx5_ib_qp *qp)
{
while (bytecnt > 0) {
__iowrite64_copy(dst++, src++, 8);
__iowrite64_copy(dst++, src++, 8);
__iowrite64_copy(dst++, src++, 8);
__iowrite64_copy(dst++, src++, 8);
__iowrite64_copy(dst++, src++, 8);
__iowrite64_copy(dst++, src++, 8);
__iowrite64_copy(dst++, src++, 8);
__iowrite64_copy(dst++, src++, 8);
bytecnt -= 64;
if (unlikely(src == qp->sq.qend))
src = mlx5_get_send_wqe(qp, 0);
}
}
static u8 get_fence(u8 fence, struct ib_send_wr *wr) static u8 get_fence(u8 fence, struct ib_send_wr *wr)
{ {
if (unlikely(wr->opcode == IB_WR_LOCAL_INV && if (unlikely(wr->opcode == IB_WR_LOCAL_INV &&
...@@ -3853,7 +3808,7 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, ...@@ -3853,7 +3808,7 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
return mlx5_ib_gsi_post_send(ibqp, wr, bad_wr); return mlx5_ib_gsi_post_send(ibqp, wr, bad_wr);
qp = to_mqp(ibqp); qp = to_mqp(ibqp);
bf = qp->bf; bf = &qp->bf;
qend = qp->sq.qend; qend = qp->sq.qend;
spin_lock_irqsave(&qp->sq.lock, flags); spin_lock_irqsave(&qp->sq.lock, flags);
...@@ -4126,28 +4081,13 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, ...@@ -4126,28 +4081,13 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
* we hit doorbell */ * we hit doorbell */
wmb(); wmb();
if (bf->need_lock) /* currently we support only regular doorbells */
spin_lock(&bf->lock); mlx5_write64((__be32 *)ctrl, bf->bfreg->map + bf->offset, NULL);
else /* Make sure doorbells don't leak out of SQ spinlock
__acquire(&bf->lock); * and reach the HCA out of order.
*/
/* TBD enable WC */ mmiowb();
if (0 && nreq == 1 && bf->uuarn && inl && size > 1 && size <= bf->buf_size / 16) {
mlx5_bf_copy(bf->reg + bf->offset, (u64 *)ctrl, ALIGN(size * 16, 64), qp);
/* wc_wmb(); */
} else {
mlx5_write64((__be32 *)ctrl, bf->regreg + bf->offset,
MLX5_GET_DOORBELL_LOCK(&bf->lock32));
/* Make sure doorbells don't leak out of SQ spinlock
* and reach the HCA out of order.
*/
mmiowb();
}
bf->offset ^= bf->buf_size; bf->offset ^= bf->buf_size;
if (bf->need_lock)
spin_unlock(&bf->lock);
else
__release(&bf->lock);
} }
spin_unlock_irqrestore(&qp->sq.lock, flags); spin_unlock_irqrestore(&qp->sq.lock, flags);
......
...@@ -179,6 +179,8 @@ int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, ...@@ -179,6 +179,8 @@ int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
mlx5_core_dbg(dev, "failed adding CP 0x%x to debug file system\n", mlx5_core_dbg(dev, "failed adding CP 0x%x to debug file system\n",
cq->cqn); cq->cqn);
cq->uar = dev->priv.uar;
return 0; return 0;
err_cmd: err_cmd:
......
...@@ -465,7 +465,6 @@ struct mlx5e_sq { ...@@ -465,7 +465,6 @@ struct mlx5e_sq {
/* read only */ /* read only */
struct mlx5_wq_cyc wq; struct mlx5_wq_cyc wq;
u32 dma_fifo_mask; u32 dma_fifo_mask;
void __iomem *uar_map;
struct netdev_queue *txq; struct netdev_queue *txq;
u32 sqn; u32 sqn;
u16 bf_buf_size; u16 bf_buf_size;
...@@ -479,7 +478,7 @@ struct mlx5e_sq { ...@@ -479,7 +478,7 @@ struct mlx5e_sq {
/* control path */ /* control path */
struct mlx5_wq_ctrl wq_ctrl; struct mlx5_wq_ctrl wq_ctrl;
struct mlx5_uar uar; struct mlx5_sq_bfreg bfreg;
struct mlx5e_channel *channel; struct mlx5e_channel *channel;
int tc; int tc;
u32 rate_limit; u32 rate_limit;
...@@ -806,7 +805,7 @@ void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, ...@@ -806,7 +805,7 @@ void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params,
static inline void mlx5e_tx_notify_hw(struct mlx5e_sq *sq, static inline void mlx5e_tx_notify_hw(struct mlx5e_sq *sq,
struct mlx5_wqe_ctrl_seg *ctrl, int bf_sz) struct mlx5_wqe_ctrl_seg *ctrl, int bf_sz)
{ {
u16 ofst = MLX5_BF_OFFSET + sq->bf_offset; u16 ofst = sq->bf_offset;
/* ensure wqe is visible to device before updating doorbell record */ /* ensure wqe is visible to device before updating doorbell record */
dma_wmb(); dma_wmb();
...@@ -818,9 +817,9 @@ static inline void mlx5e_tx_notify_hw(struct mlx5e_sq *sq, ...@@ -818,9 +817,9 @@ static inline void mlx5e_tx_notify_hw(struct mlx5e_sq *sq,
*/ */
wmb(); wmb();
if (bf_sz) if (bf_sz)
__iowrite64_copy(sq->uar_map + ofst, ctrl, bf_sz); __iowrite64_copy(sq->bfreg.map + ofst, ctrl, bf_sz);
else else
mlx5_write64((__be32 *)ctrl, sq->uar_map + ofst, NULL); mlx5_write64((__be32 *)ctrl, sq->bfreg.map + ofst, NULL);
/* flush the write-combining mapped buffer */ /* flush the write-combining mapped buffer */
wmb(); wmb();
...@@ -832,7 +831,7 @@ static inline void mlx5e_cq_arm(struct mlx5e_cq *cq) ...@@ -832,7 +831,7 @@ static inline void mlx5e_cq_arm(struct mlx5e_cq *cq)
struct mlx5_core_cq *mcq; struct mlx5_core_cq *mcq;
mcq = &cq->mcq; mcq = &cq->mcq;
mlx5_cq_arm(mcq, MLX5_CQ_DB_REQ_NOT, mcq->uar->map, NULL, cq->wq.cc); mlx5_cq_arm(mcq, MLX5_CQ_DB_REQ_NOT, mcq->uar->map, cq->wq.cc);
} }
static inline u32 mlx5e_get_wqe_mtt_offset(struct mlx5e_rq *rq, u16 wqe_ix) static inline u32 mlx5e_get_wqe_mtt_offset(struct mlx5e_rq *rq, u16 wqe_ix)
......
...@@ -89,16 +89,10 @@ int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev) ...@@ -89,16 +89,10 @@ int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev)
struct mlx5e_resources *res = &mdev->mlx5e_res; struct mlx5e_resources *res = &mdev->mlx5e_res;
int err; int err;
err = mlx5_alloc_map_uar(mdev, &res->cq_uar, false);
if (err) {
mlx5_core_err(mdev, "alloc_map uar failed, %d\n", err);
return err;
}
err = mlx5_core_alloc_pd(mdev, &res->pdn); err = mlx5_core_alloc_pd(mdev, &res->pdn);
if (err) { if (err) {
mlx5_core_err(mdev, "alloc pd failed, %d\n", err); mlx5_core_err(mdev, "alloc pd failed, %d\n", err);
goto err_unmap_free_uar; return err;
} }
err = mlx5_core_alloc_transport_domain(mdev, &res->td.tdn); err = mlx5_core_alloc_transport_domain(mdev, &res->td.tdn);
...@@ -121,9 +115,6 @@ int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev) ...@@ -121,9 +115,6 @@ int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev)
mlx5_core_dealloc_transport_domain(mdev, res->td.tdn); mlx5_core_dealloc_transport_domain(mdev, res->td.tdn);
err_dealloc_pd: err_dealloc_pd:
mlx5_core_dealloc_pd(mdev, res->pdn); mlx5_core_dealloc_pd(mdev, res->pdn);
err_unmap_free_uar:
mlx5_unmap_free_uar(mdev, &res->cq_uar);
return err; return err;
} }
...@@ -134,7 +125,6 @@ void mlx5e_destroy_mdev_resources(struct mlx5_core_dev *mdev) ...@@ -134,7 +125,6 @@ void mlx5e_destroy_mdev_resources(struct mlx5_core_dev *mdev)
mlx5_core_destroy_mkey(mdev, &res->mkey); mlx5_core_destroy_mkey(mdev, &res->mkey);
mlx5_core_dealloc_transport_domain(mdev, res->td.tdn); mlx5_core_dealloc_transport_domain(mdev, res->td.tdn);
mlx5_core_dealloc_pd(mdev, res->pdn); mlx5_core_dealloc_pd(mdev, res->pdn);
mlx5_unmap_free_uar(mdev, &res->cq_uar);
} }
int mlx5e_refresh_tirs_self_loopback(struct mlx5_core_dev *mdev, int mlx5e_refresh_tirs_self_loopback(struct mlx5_core_dev *mdev,
......
...@@ -967,7 +967,7 @@ static int mlx5e_create_sq(struct mlx5e_channel *c, ...@@ -967,7 +967,7 @@ static int mlx5e_create_sq(struct mlx5e_channel *c,
sq->channel = c; sq->channel = c;
sq->tc = tc; sq->tc = tc;
err = mlx5_alloc_map_uar(mdev, &sq->uar, !!MLX5_CAP_GEN(mdev, bf)); err = mlx5_alloc_bfreg(mdev, &sq->bfreg, MLX5_CAP_GEN(mdev, bf), false);
if (err) if (err)
return err; return err;
...@@ -979,12 +979,9 @@ static int mlx5e_create_sq(struct mlx5e_channel *c, ...@@ -979,12 +979,9 @@ static int mlx5e_create_sq(struct mlx5e_channel *c,
goto err_unmap_free_uar; goto err_unmap_free_uar;
sq->wq.db = &sq->wq.db[MLX5_SND_DBR]; sq->wq.db = &sq->wq.db[MLX5_SND_DBR];
if (sq->uar.bf_map) { if (sq->bfreg.wc)
set_bit(MLX5E_SQ_STATE_BF_ENABLE, &sq->state); set_bit(MLX5E_SQ_STATE_BF_ENABLE, &sq->state);
sq->uar_map = sq->uar.bf_map;
} else {
sq->uar_map = sq->uar.map;
}
sq->bf_buf_size = (1 << MLX5_CAP_GEN(mdev, log_bf_reg_size)) / 2; sq->bf_buf_size = (1 << MLX5_CAP_GEN(mdev, log_bf_reg_size)) / 2;
sq->max_inline = param->max_inline; sq->max_inline = param->max_inline;
sq->min_inline_mode = sq->min_inline_mode =
...@@ -1012,7 +1009,7 @@ static int mlx5e_create_sq(struct mlx5e_channel *c, ...@@ -1012,7 +1009,7 @@ static int mlx5e_create_sq(struct mlx5e_channel *c,
mlx5_wq_destroy(&sq->wq_ctrl); mlx5_wq_destroy(&sq->wq_ctrl);
err_unmap_free_uar: err_unmap_free_uar:
mlx5_unmap_free_uar(mdev, &sq->uar); mlx5_free_bfreg(mdev, &sq->bfreg);
return err; return err;
} }
...@@ -1024,7 +1021,7 @@ static void mlx5e_destroy_sq(struct mlx5e_sq *sq) ...@@ -1024,7 +1021,7 @@ static void mlx5e_destroy_sq(struct mlx5e_sq *sq)
mlx5e_free_sq_db(sq); mlx5e_free_sq_db(sq);
mlx5_wq_destroy(&sq->wq_ctrl); mlx5_wq_destroy(&sq->wq_ctrl);
mlx5_unmap_free_uar(priv->mdev, &sq->uar); mlx5_free_bfreg(priv->mdev, &sq->bfreg);
} }
static int mlx5e_enable_sq(struct mlx5e_sq *sq, struct mlx5e_sq_param *param) static int mlx5e_enable_sq(struct mlx5e_sq *sq, struct mlx5e_sq_param *param)
...@@ -1058,7 +1055,7 @@ static int mlx5e_enable_sq(struct mlx5e_sq *sq, struct mlx5e_sq_param *param) ...@@ -1058,7 +1055,7 @@ static int mlx5e_enable_sq(struct mlx5e_sq *sq, struct mlx5e_sq_param *param)
MLX5_SET(sqc, sqc, tis_lst_sz, param->type == MLX5E_SQ_ICO ? 0 : 1); MLX5_SET(sqc, sqc, tis_lst_sz, param->type == MLX5E_SQ_ICO ? 0 : 1);
MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC); MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC);
MLX5_SET(wq, wq, uar_page, sq->uar.index); MLX5_SET(wq, wq, uar_page, sq->bfreg.index);
MLX5_SET(wq, wq, log_wq_pg_sz, sq->wq_ctrl.buf.page_shift - MLX5_SET(wq, wq, log_wq_pg_sz, sq->wq_ctrl.buf.page_shift -
MLX5_ADAPTER_PAGE_SHIFT); MLX5_ADAPTER_PAGE_SHIFT);
MLX5_SET64(wq, wq, dbr_addr, sq->wq_ctrl.db.dma); MLX5_SET64(wq, wq, dbr_addr, sq->wq_ctrl.db.dma);
...@@ -1216,7 +1213,6 @@ static int mlx5e_create_cq(struct mlx5e_channel *c, ...@@ -1216,7 +1213,6 @@ static int mlx5e_create_cq(struct mlx5e_channel *c,
mcq->comp = mlx5e_completion_event; mcq->comp = mlx5e_completion_event;
mcq->event = mlx5e_cq_error_event; mcq->event = mlx5e_cq_error_event;
mcq->irqn = irqn; mcq->irqn = irqn;
mcq->uar = &mdev->mlx5e_res.cq_uar;
for (i = 0; i < mlx5_cqwq_get_size(&cq->wq); i++) { for (i = 0; i < mlx5_cqwq_get_size(&cq->wq); i++) {
struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(&cq->wq, i); struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(&cq->wq, i);
...@@ -1265,7 +1261,7 @@ static int mlx5e_enable_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param) ...@@ -1265,7 +1261,7 @@ static int mlx5e_enable_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param)
MLX5_SET(cqc, cqc, cq_period_mode, param->cq_period_mode); MLX5_SET(cqc, cqc, cq_period_mode, param->cq_period_mode);
MLX5_SET(cqc, cqc, c_eqn, eqn); MLX5_SET(cqc, cqc, c_eqn, eqn);
MLX5_SET(cqc, cqc, uar_page, mcq->uar->index); MLX5_SET(cqc, cqc, uar_page, mdev->priv.uar->index);
MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.frag_buf.page_shift - MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.frag_buf.page_shift -
MLX5_ADAPTER_PAGE_SHIFT); MLX5_ADAPTER_PAGE_SHIFT);
MLX5_SET64(cqc, cqc, dbr_addr, cq->wq_ctrl.db.dma); MLX5_SET64(cqc, cqc, dbr_addr, cq->wq_ctrl.db.dma);
...@@ -1677,7 +1673,7 @@ static void mlx5e_build_common_cq_param(struct mlx5e_priv *priv, ...@@ -1677,7 +1673,7 @@ static void mlx5e_build_common_cq_param(struct mlx5e_priv *priv,
{ {
void *cqc = param->cqc; void *cqc = param->cqc;
MLX5_SET(cqc, cqc, uar_page, priv->mdev->mlx5e_res.cq_uar.index); MLX5_SET(cqc, cqc, uar_page, priv->mdev->priv.uar->index);
} }
static void mlx5e_build_rx_cq_param(struct mlx5e_priv *priv, static void mlx5e_build_rx_cq_param(struct mlx5e_priv *priv,
...@@ -2296,7 +2292,6 @@ static int mlx5e_create_drop_cq(struct mlx5e_priv *priv, ...@@ -2296,7 +2292,6 @@ static int mlx5e_create_drop_cq(struct mlx5e_priv *priv,
mcq->comp = mlx5e_completion_event; mcq->comp = mlx5e_completion_event;
mcq->event = mlx5e_cq_error_event; mcq->event = mlx5e_cq_error_event;
mcq->irqn = irqn; mcq->irqn = irqn;
mcq->uar = &mdev->mlx5e_res.cq_uar;
cq->priv = priv; cq->priv = priv;
......
...@@ -512,7 +512,7 @@ static void init_eq_buf(struct mlx5_eq *eq) ...@@ -512,7 +512,7 @@ static void init_eq_buf(struct mlx5_eq *eq)
int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx, int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
int nent, u64 mask, const char *name, int nent, u64 mask, const char *name,
struct mlx5_uar *uar, enum mlx5_eq_type type) enum mlx5_eq_type type)
{ {
u32 out[MLX5_ST_SZ_DW(create_eq_out)] = {0}; u32 out[MLX5_ST_SZ_DW(create_eq_out)] = {0};
struct mlx5_priv *priv = &dev->priv; struct mlx5_priv *priv = &dev->priv;
...@@ -556,7 +556,7 @@ int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx, ...@@ -556,7 +556,7 @@ int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
eqc = MLX5_ADDR_OF(create_eq_in, in, eq_context_entry); eqc = MLX5_ADDR_OF(create_eq_in, in, eq_context_entry);
MLX5_SET(eqc, eqc, log_eq_size, ilog2(eq->nent)); MLX5_SET(eqc, eqc, log_eq_size, ilog2(eq->nent));
MLX5_SET(eqc, eqc, uar_page, uar->index); MLX5_SET(eqc, eqc, uar_page, priv->uar->index);
MLX5_SET(eqc, eqc, intr, vecidx); MLX5_SET(eqc, eqc, intr, vecidx);
MLX5_SET(eqc, eqc, log_page_size, MLX5_SET(eqc, eqc, log_page_size,
eq->buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT); eq->buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT);
...@@ -571,7 +571,7 @@ int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx, ...@@ -571,7 +571,7 @@ int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
eq->eqn = MLX5_GET(create_eq_out, out, eq_number); eq->eqn = MLX5_GET(create_eq_out, out, eq_number);
eq->irqn = priv->msix_arr[vecidx].vector; eq->irqn = priv->msix_arr[vecidx].vector;
eq->dev = dev; eq->dev = dev;
eq->doorbell = uar->map + MLX5_EQ_DOORBEL_OFFSET; eq->doorbell = priv->uar->map + MLX5_EQ_DOORBEL_OFFSET;
err = request_irq(eq->irqn, handler, 0, err = request_irq(eq->irqn, handler, 0,
priv->irq_info[vecidx].name, eq); priv->irq_info[vecidx].name, eq);
if (err) if (err)
...@@ -686,8 +686,7 @@ int mlx5_start_eqs(struct mlx5_core_dev *dev) ...@@ -686,8 +686,7 @@ int mlx5_start_eqs(struct mlx5_core_dev *dev)
err = mlx5_create_map_eq(dev, &table->cmd_eq, MLX5_EQ_VEC_CMD, err = mlx5_create_map_eq(dev, &table->cmd_eq, MLX5_EQ_VEC_CMD,
MLX5_NUM_CMD_EQE, 1ull << MLX5_EVENT_TYPE_CMD, MLX5_NUM_CMD_EQE, 1ull << MLX5_EVENT_TYPE_CMD,
"mlx5_cmd_eq", &dev->priv.uuari.uars[0], "mlx5_cmd_eq", MLX5_EQ_TYPE_ASYNC);
MLX5_EQ_TYPE_ASYNC);
if (err) { if (err) {
mlx5_core_warn(dev, "failed to create cmd EQ %d\n", err); mlx5_core_warn(dev, "failed to create cmd EQ %d\n", err);
return err; return err;
...@@ -697,8 +696,7 @@ int mlx5_start_eqs(struct mlx5_core_dev *dev) ...@@ -697,8 +696,7 @@ int mlx5_start_eqs(struct mlx5_core_dev *dev)
err = mlx5_create_map_eq(dev, &table->async_eq, MLX5_EQ_VEC_ASYNC, err = mlx5_create_map_eq(dev, &table->async_eq, MLX5_EQ_VEC_ASYNC,
MLX5_NUM_ASYNC_EQE, async_event_mask, MLX5_NUM_ASYNC_EQE, async_event_mask,
"mlx5_async_eq", &dev->priv.uuari.uars[0], "mlx5_async_eq", MLX5_EQ_TYPE_ASYNC);
MLX5_EQ_TYPE_ASYNC);
if (err) { if (err) {
mlx5_core_warn(dev, "failed to create async EQ %d\n", err); mlx5_core_warn(dev, "failed to create async EQ %d\n", err);
goto err1; goto err1;
...@@ -708,7 +706,6 @@ int mlx5_start_eqs(struct mlx5_core_dev *dev) ...@@ -708,7 +706,6 @@ int mlx5_start_eqs(struct mlx5_core_dev *dev)
MLX5_EQ_VEC_PAGES, MLX5_EQ_VEC_PAGES,
/* TODO: sriov max_vf + */ 1, /* TODO: sriov max_vf + */ 1,
1 << MLX5_EVENT_TYPE_PAGE_REQUEST, "mlx5_pages_eq", 1 << MLX5_EVENT_TYPE_PAGE_REQUEST, "mlx5_pages_eq",
&dev->priv.uuari.uars[0],
MLX5_EQ_TYPE_ASYNC); MLX5_EQ_TYPE_ASYNC);
if (err) { if (err) {
mlx5_core_warn(dev, "failed to create pages EQ %d\n", err); mlx5_core_warn(dev, "failed to create pages EQ %d\n", err);
...@@ -722,7 +719,6 @@ int mlx5_start_eqs(struct mlx5_core_dev *dev) ...@@ -722,7 +719,6 @@ int mlx5_start_eqs(struct mlx5_core_dev *dev)
MLX5_NUM_ASYNC_EQE, MLX5_NUM_ASYNC_EQE,
1 << MLX5_EVENT_TYPE_PAGE_FAULT, 1 << MLX5_EVENT_TYPE_PAGE_FAULT,
"mlx5_page_fault_eq", "mlx5_page_fault_eq",
&dev->priv.uuari.uars[0],
MLX5_EQ_TYPE_PF); MLX5_EQ_TYPE_PF);
if (err) { if (err) {
mlx5_core_warn(dev, "failed to create page fault EQ %d\n", mlx5_core_warn(dev, "failed to create page fault EQ %d\n",
......
...@@ -537,6 +537,10 @@ static int handle_hca_cap(struct mlx5_core_dev *dev) ...@@ -537,6 +537,10 @@ static int handle_hca_cap(struct mlx5_core_dev *dev)
/* disable cmdif checksum */ /* disable cmdif checksum */
MLX5_SET(cmd_hca_cap, set_hca_cap, cmdif_checksum, 0); MLX5_SET(cmd_hca_cap, set_hca_cap, cmdif_checksum, 0);
/* If the HCA supports 4K UARs use it */
if (MLX5_CAP_GEN_MAX(dev, uar_4k))
MLX5_SET(cmd_hca_cap, set_hca_cap, uar_4k, 1);
MLX5_SET(cmd_hca_cap, set_hca_cap, log_uar_page_sz, PAGE_SHIFT - 12); MLX5_SET(cmd_hca_cap, set_hca_cap, log_uar_page_sz, PAGE_SHIFT - 12);
err = set_caps(dev, set_ctx, set_sz, err = set_caps(dev, set_ctx, set_sz,
...@@ -759,8 +763,7 @@ static int alloc_comp_eqs(struct mlx5_core_dev *dev) ...@@ -759,8 +763,7 @@ static int alloc_comp_eqs(struct mlx5_core_dev *dev)
snprintf(name, MLX5_MAX_IRQ_NAME, "mlx5_comp%d", i); snprintf(name, MLX5_MAX_IRQ_NAME, "mlx5_comp%d", i);
err = mlx5_create_map_eq(dev, eq, err = mlx5_create_map_eq(dev, eq,
i + MLX5_EQ_VEC_COMP_BASE, nent, 0, i + MLX5_EQ_VEC_COMP_BASE, nent, 0,
name, &dev->priv.uuari.uars[0], name, MLX5_EQ_TYPE_COMP);
MLX5_EQ_TYPE_COMP);
if (err) { if (err) {
kfree(eq); kfree(eq);
goto clean; goto clean;
...@@ -920,8 +923,6 @@ static int mlx5_init_once(struct mlx5_core_dev *dev, struct mlx5_priv *priv) ...@@ -920,8 +923,6 @@ static int mlx5_init_once(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
goto out; goto out;
} }
MLX5_INIT_DOORBELL_LOCK(&priv->cq_uar_lock);
err = mlx5_init_cq_table(dev); err = mlx5_init_cq_table(dev);
if (err) { if (err) {
dev_err(&pdev->dev, "failed to initialize cq table\n"); dev_err(&pdev->dev, "failed to initialize cq table\n");
...@@ -1100,8 +1101,8 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv, ...@@ -1100,8 +1101,8 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
goto err_cleanup_once; goto err_cleanup_once;
} }
err = mlx5_alloc_uuars(dev, &priv->uuari); dev->priv.uar = mlx5_get_uars_page(dev);
if (err) { if (!dev->priv.uar) {
dev_err(&pdev->dev, "Failed allocating uar, aborting\n"); dev_err(&pdev->dev, "Failed allocating uar, aborting\n");
goto err_disable_msix; goto err_disable_msix;
} }
...@@ -1109,7 +1110,7 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv, ...@@ -1109,7 +1110,7 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
err = mlx5_start_eqs(dev); err = mlx5_start_eqs(dev);
if (err) { if (err) {
dev_err(&pdev->dev, "Failed to start pages and async EQs\n"); dev_err(&pdev->dev, "Failed to start pages and async EQs\n");
goto err_free_uar; goto err_put_uars;
} }
err = alloc_comp_eqs(dev); err = alloc_comp_eqs(dev);
...@@ -1175,8 +1176,8 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv, ...@@ -1175,8 +1176,8 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
err_stop_eqs: err_stop_eqs:
mlx5_stop_eqs(dev); mlx5_stop_eqs(dev);
err_free_uar: err_put_uars:
mlx5_free_uuars(dev, &priv->uuari); mlx5_put_uars_page(dev, priv->uar);
err_disable_msix: err_disable_msix:
mlx5_disable_msix(dev); mlx5_disable_msix(dev);
...@@ -1238,7 +1239,7 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv, ...@@ -1238,7 +1239,7 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
mlx5_irq_clear_affinity_hints(dev); mlx5_irq_clear_affinity_hints(dev);
free_comp_eqs(dev); free_comp_eqs(dev);
mlx5_stop_eqs(dev); mlx5_stop_eqs(dev);
mlx5_free_uuars(dev, &priv->uuari); mlx5_put_uars_page(dev, priv->uar);
mlx5_disable_msix(dev); mlx5_disable_msix(dev);
if (cleanup) if (cleanup)
mlx5_cleanup_once(dev); mlx5_cleanup_once(dev);
...@@ -1313,6 +1314,11 @@ static int init_one(struct pci_dev *pdev, ...@@ -1313,6 +1314,11 @@ static int init_one(struct pci_dev *pdev,
goto clean_dev; goto clean_dev;
} }
#endif #endif
mutex_init(&priv->bfregs.reg_head.lock);
mutex_init(&priv->bfregs.wc_head.lock);
INIT_LIST_HEAD(&priv->bfregs.reg_head.list);
INIT_LIST_HEAD(&priv->bfregs.wc_head.list);
err = mlx5_pci_init(dev, priv); err = mlx5_pci_init(dev, priv);
if (err) { if (err) {
dev_err(&pdev->dev, "mlx5_pci_init failed with error code %d\n", err); dev_err(&pdev->dev, "mlx5_pci_init failed with error code %d\n", err);
......
...@@ -37,11 +37,6 @@ ...@@ -37,11 +37,6 @@
#include <linux/mlx5/cmd.h> #include <linux/mlx5/cmd.h>
#include "mlx5_core.h" #include "mlx5_core.h"
enum {
NUM_DRIVER_UARS = 4,
NUM_LOW_LAT_UUARS = 4,
};
int mlx5_cmd_alloc_uar(struct mlx5_core_dev *dev, u32 *uarn) int mlx5_cmd_alloc_uar(struct mlx5_core_dev *dev, u32 *uarn)
{ {
u32 out[MLX5_ST_SZ_DW(alloc_uar_out)] = {0}; u32 out[MLX5_ST_SZ_DW(alloc_uar_out)] = {0};
...@@ -67,167 +62,269 @@ int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn) ...@@ -67,167 +62,269 @@ int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn)
} }
EXPORT_SYMBOL(mlx5_cmd_free_uar); EXPORT_SYMBOL(mlx5_cmd_free_uar);
static int need_uuar_lock(int uuarn) static int uars_per_sys_page(struct mlx5_core_dev *mdev)
{ {
int tot_uuars = NUM_DRIVER_UARS * MLX5_BF_REGS_PER_PAGE; if (MLX5_CAP_GEN(mdev, uar_4k))
return MLX5_CAP_GEN(mdev, num_of_uars_per_page);
if (uuarn == 0 || tot_uuars - NUM_LOW_LAT_UUARS)
return 0;
return 1; return 1;
} }
int mlx5_alloc_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari) static u64 uar2pfn(struct mlx5_core_dev *mdev, u32 index)
{ {
int tot_uuars = NUM_DRIVER_UARS * MLX5_BF_REGS_PER_PAGE; u32 system_page_index;
struct mlx5_bf *bf;
phys_addr_t addr; if (MLX5_CAP_GEN(mdev, uar_4k))
int err; system_page_index = index >> (PAGE_SHIFT - MLX5_ADAPTER_PAGE_SHIFT);
else
system_page_index = index;
return (pci_resource_start(mdev->pdev, 0) >> PAGE_SHIFT) + system_page_index;
}
static void up_rel_func(struct kref *kref)
{
struct mlx5_uars_page *up = container_of(kref, struct mlx5_uars_page, ref_count);
list_del(&up->list);
if (mlx5_cmd_free_uar(up->mdev, up->index))
mlx5_core_warn(up->mdev, "failed to free uar index %d\n", up->index);
kfree(up->reg_bitmap);
kfree(up->fp_bitmap);
kfree(up);
}
static struct mlx5_uars_page *alloc_uars_page(struct mlx5_core_dev *mdev,
bool map_wc)
{
struct mlx5_uars_page *up;
int err = -ENOMEM;
phys_addr_t pfn;
int bfregs;
int i; int i;
uuari->num_uars = NUM_DRIVER_UARS; bfregs = uars_per_sys_page(mdev) * MLX5_BFREGS_PER_UAR;
uuari->num_low_latency_uuars = NUM_LOW_LAT_UUARS; up = kzalloc(sizeof(*up), GFP_KERNEL);
if (!up)
return ERR_PTR(err);
mutex_init(&uuari->lock); up->mdev = mdev;
uuari->uars = kcalloc(uuari->num_uars, sizeof(*uuari->uars), GFP_KERNEL); up->reg_bitmap = kcalloc(BITS_TO_LONGS(bfregs), sizeof(unsigned long), GFP_KERNEL);
if (!uuari->uars) if (!up->reg_bitmap)
return -ENOMEM; goto error1;
uuari->bfs = kcalloc(tot_uuars, sizeof(*uuari->bfs), GFP_KERNEL); up->fp_bitmap = kcalloc(BITS_TO_LONGS(bfregs), sizeof(unsigned long), GFP_KERNEL);
if (!uuari->bfs) { if (!up->fp_bitmap)
err = -ENOMEM; goto error1;
goto out_uars;
}
uuari->bitmap = kcalloc(BITS_TO_LONGS(tot_uuars), sizeof(*uuari->bitmap), for (i = 0; i < bfregs; i++)
GFP_KERNEL); if ((i % MLX5_BFREGS_PER_UAR) < MLX5_NON_FP_BFREGS_PER_UAR)
if (!uuari->bitmap) { set_bit(i, up->reg_bitmap);
err = -ENOMEM; else
goto out_bfs; set_bit(i, up->fp_bitmap);
}
uuari->count = kcalloc(tot_uuars, sizeof(*uuari->count), GFP_KERNEL); up->bfregs = bfregs;
if (!uuari->count) { up->fp_avail = bfregs * MLX5_FP_BFREGS_PER_UAR / MLX5_BFREGS_PER_UAR;
err = -ENOMEM; up->reg_avail = bfregs * MLX5_NON_FP_BFREGS_PER_UAR / MLX5_BFREGS_PER_UAR;
goto out_bitmap;
}
for (i = 0; i < uuari->num_uars; i++) { err = mlx5_cmd_alloc_uar(mdev, &up->index);
err = mlx5_cmd_alloc_uar(dev, &uuari->uars[i].index); if (err) {
if (err) mlx5_core_warn(mdev, "mlx5_cmd_alloc_uar() failed, %d\n", err);
goto out_count; goto error1;
}
addr = dev->iseg_base + ((phys_addr_t)(uuari->uars[i].index) << PAGE_SHIFT); pfn = uar2pfn(mdev, up->index);
uuari->uars[i].map = ioremap(addr, PAGE_SIZE); if (map_wc) {
if (!uuari->uars[i].map) { up->map = ioremap_wc(pfn << PAGE_SHIFT, PAGE_SIZE);
mlx5_cmd_free_uar(dev, uuari->uars[i].index); if (!up->map) {
err = -EAGAIN;
goto error2;
}
} else {
up->map = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE);
if (!up->map) {
err = -ENOMEM; err = -ENOMEM;
goto out_count; goto error2;
} }
mlx5_core_dbg(dev, "allocated uar index 0x%x, mmaped at %p\n",
uuari->uars[i].index, uuari->uars[i].map);
}
for (i = 0; i < tot_uuars; i++) {
bf = &uuari->bfs[i];
bf->buf_size = (1 << MLX5_CAP_GEN(dev, log_bf_reg_size)) / 2;
bf->uar = &uuari->uars[i / MLX5_BF_REGS_PER_PAGE];
bf->regreg = uuari->uars[i / MLX5_BF_REGS_PER_PAGE].map;
bf->reg = NULL; /* Add WC support */
bf->offset = (i % MLX5_BF_REGS_PER_PAGE) *
(1 << MLX5_CAP_GEN(dev, log_bf_reg_size)) +
MLX5_BF_OFFSET;
bf->need_lock = need_uuar_lock(i);
spin_lock_init(&bf->lock);
spin_lock_init(&bf->lock32);
bf->uuarn = i;
} }
kref_init(&up->ref_count);
mlx5_core_dbg(mdev, "allocated UAR page: index %d, total bfregs %d\n",
up->index, up->bfregs);
return up;
error2:
if (mlx5_cmd_free_uar(mdev, up->index))
mlx5_core_warn(mdev, "failed to free uar index %d\n", up->index);
error1:
kfree(up->fp_bitmap);
kfree(up->reg_bitmap);
kfree(up);
return ERR_PTR(err);
}
return 0; struct mlx5_uars_page *mlx5_get_uars_page(struct mlx5_core_dev *mdev)
{
out_count: struct mlx5_uars_page *ret;
for (i--; i >= 0; i--) {
iounmap(uuari->uars[i].map); mutex_lock(&mdev->priv.bfregs.reg_head.lock);
mlx5_cmd_free_uar(dev, uuari->uars[i].index); if (list_empty(&mdev->priv.bfregs.reg_head.list)) {
ret = alloc_uars_page(mdev, false);
if (IS_ERR(ret)) {
ret = NULL;
goto out;
}
list_add(&ret->list, &mdev->priv.bfregs.reg_head.list);
} else {
ret = list_first_entry(&mdev->priv.bfregs.reg_head.list,
struct mlx5_uars_page, list);
kref_get(&ret->ref_count);
} }
kfree(uuari->count); out:
mutex_unlock(&mdev->priv.bfregs.reg_head.lock);
out_bitmap: return ret;
kfree(uuari->bitmap); }
EXPORT_SYMBOL(mlx5_get_uars_page);
out_bfs:
kfree(uuari->bfs);
out_uars: void mlx5_put_uars_page(struct mlx5_core_dev *mdev, struct mlx5_uars_page *up)
kfree(uuari->uars); {
return err; mutex_lock(&mdev->priv.bfregs.reg_head.lock);
kref_put(&up->ref_count, up_rel_func);
mutex_unlock(&mdev->priv.bfregs.reg_head.lock);
} }
EXPORT_SYMBOL(mlx5_put_uars_page);
int mlx5_free_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari) static unsigned long map_offset(struct mlx5_core_dev *mdev, int dbi)
{ {
int i = uuari->num_uars; /* return the offset in bytes from the start of the page to the
* blue flame area of the UAR
*/
return dbi / MLX5_BFREGS_PER_UAR * MLX5_ADAPTER_PAGE_SIZE +
(dbi % MLX5_BFREGS_PER_UAR) *
(1 << MLX5_CAP_GEN(mdev, log_bf_reg_size)) + MLX5_BF_OFFSET;
}
for (i--; i >= 0; i--) { static int alloc_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg,
iounmap(uuari->uars[i].map); bool map_wc, bool fast_path)
mlx5_cmd_free_uar(dev, uuari->uars[i].index); {
struct mlx5_bfreg_data *bfregs;
struct mlx5_uars_page *up;
struct list_head *head;
unsigned long *bitmap;
unsigned int *avail;
struct mutex *lock; /* pointer to right mutex */
int dbi;
bfregs = &mdev->priv.bfregs;
if (map_wc) {
head = &bfregs->wc_head.list;
lock = &bfregs->wc_head.lock;
} else {
head = &bfregs->reg_head.list;
lock = &bfregs->reg_head.lock;
} }
mutex_lock(lock);
kfree(uuari->count); if (list_empty(head)) {
kfree(uuari->bitmap); up = alloc_uars_page(mdev, map_wc);
kfree(uuari->bfs); if (IS_ERR(up)) {
kfree(uuari->uars); mutex_unlock(lock);
return PTR_ERR(up);
}
list_add(&up->list, head);
} else {
up = list_entry(head->next, struct mlx5_uars_page, list);
kref_get(&up->ref_count);
}
if (fast_path) {
bitmap = up->fp_bitmap;
avail = &up->fp_avail;
} else {
bitmap = up->reg_bitmap;
avail = &up->reg_avail;
}
dbi = find_first_bit(bitmap, up->bfregs);
clear_bit(dbi, bitmap);
(*avail)--;
if (!(*avail))
list_del(&up->list);
bfreg->map = up->map + map_offset(mdev, dbi);
bfreg->up = up;
bfreg->wc = map_wc;
bfreg->index = up->index + dbi / MLX5_BFREGS_PER_UAR;
mutex_unlock(lock);
return 0; return 0;
} }
int mlx5_alloc_map_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar, int mlx5_alloc_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg,
bool map_wc) bool map_wc, bool fast_path)
{ {
phys_addr_t pfn;
phys_addr_t uar_bar_start;
int err; int err;
err = mlx5_cmd_alloc_uar(mdev, &uar->index); err = alloc_bfreg(mdev, bfreg, map_wc, fast_path);
if (err) { if (!err)
mlx5_core_warn(mdev, "mlx5_cmd_alloc_uar() failed, %d\n", err); return 0;
return err;
}
uar_bar_start = pci_resource_start(mdev->pdev, 0); if (err == -EAGAIN && map_wc)
pfn = (uar_bar_start >> PAGE_SHIFT) + uar->index; return alloc_bfreg(mdev, bfreg, false, fast_path);
if (map_wc) { return err;
uar->bf_map = ioremap_wc(pfn << PAGE_SHIFT, PAGE_SIZE); }
if (!uar->bf_map) { EXPORT_SYMBOL(mlx5_alloc_bfreg);
mlx5_core_warn(mdev, "ioremap_wc() failed\n");
uar->map = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE);
if (!uar->map)
goto err_free_uar;
}
} else {
uar->map = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE);
if (!uar->map)
goto err_free_uar;
}
return 0; static unsigned int addr_to_dbi_in_syspage(struct mlx5_core_dev *dev,
struct mlx5_uars_page *up,
struct mlx5_sq_bfreg *bfreg)
{
unsigned int uar_idx;
unsigned int bfreg_idx;
unsigned int bf_reg_size;
err_free_uar: bf_reg_size = 1 << MLX5_CAP_GEN(dev, log_bf_reg_size);
mlx5_core_warn(mdev, "ioremap() failed\n");
err = -ENOMEM;
mlx5_cmd_free_uar(mdev, uar->index);
return err; uar_idx = (bfreg->map - up->map) >> MLX5_ADAPTER_PAGE_SHIFT;
bfreg_idx = (((uintptr_t)bfreg->map % MLX5_ADAPTER_PAGE_SIZE) - MLX5_BF_OFFSET) / bf_reg_size;
return uar_idx * MLX5_BFREGS_PER_UAR + bfreg_idx;
} }
EXPORT_SYMBOL(mlx5_alloc_map_uar);
void mlx5_unmap_free_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar) void mlx5_free_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg)
{ {
if (uar->map) struct mlx5_bfreg_data *bfregs;
iounmap(uar->map); struct mlx5_uars_page *up;
else struct mutex *lock; /* pointer to right mutex */
iounmap(uar->bf_map); unsigned int dbi;
mlx5_cmd_free_uar(mdev, uar->index); bool fp;
unsigned int *avail;
unsigned long *bitmap;
struct list_head *head;
bfregs = &mdev->priv.bfregs;
if (bfreg->wc) {
head = &bfregs->wc_head.list;
lock = &bfregs->wc_head.lock;
} else {
head = &bfregs->reg_head.list;
lock = &bfregs->reg_head.lock;
}
up = bfreg->up;
dbi = addr_to_dbi_in_syspage(mdev, up, bfreg);
fp = (dbi % MLX5_BFREGS_PER_UAR) >= MLX5_NON_FP_BFREGS_PER_UAR;
if (fp) {
avail = &up->fp_avail;
bitmap = up->fp_bitmap;
} else {
avail = &up->reg_avail;
bitmap = up->reg_bitmap;
}
mutex_lock(lock);
(*avail)++;
set_bit(dbi, bitmap);
if (*avail == 1)
list_add_tail(&up->list, head);
kref_put(&up->ref_count, up_rel_func);
mutex_unlock(lock);
} }
EXPORT_SYMBOL(mlx5_unmap_free_uar); EXPORT_SYMBOL(mlx5_free_bfreg);
...@@ -42,13 +42,13 @@ struct mlx5_core_cq { ...@@ -42,13 +42,13 @@ struct mlx5_core_cq {
int cqe_sz; int cqe_sz;
__be32 *set_ci_db; __be32 *set_ci_db;
__be32 *arm_db; __be32 *arm_db;
struct mlx5_uars_page *uar;
atomic_t refcount; atomic_t refcount;
struct completion free; struct completion free;
unsigned vector; unsigned vector;
unsigned int irqn; unsigned int irqn;
void (*comp) (struct mlx5_core_cq *); void (*comp) (struct mlx5_core_cq *);
void (*event) (struct mlx5_core_cq *, enum mlx5_event); void (*event) (struct mlx5_core_cq *, enum mlx5_event);
struct mlx5_uar *uar;
u32 cons_index; u32 cons_index;
unsigned arm_sn; unsigned arm_sn;
struct mlx5_rsc_debug *dbg; struct mlx5_rsc_debug *dbg;
...@@ -144,7 +144,6 @@ enum { ...@@ -144,7 +144,6 @@ enum {
static inline void mlx5_cq_arm(struct mlx5_core_cq *cq, u32 cmd, static inline void mlx5_cq_arm(struct mlx5_core_cq *cq, u32 cmd,
void __iomem *uar_page, void __iomem *uar_page,
spinlock_t *doorbell_lock,
u32 cons_index) u32 cons_index)
{ {
__be32 doorbell[2]; __be32 doorbell[2];
...@@ -164,7 +163,7 @@ static inline void mlx5_cq_arm(struct mlx5_core_cq *cq, u32 cmd, ...@@ -164,7 +163,7 @@ static inline void mlx5_cq_arm(struct mlx5_core_cq *cq, u32 cmd,
doorbell[0] = cpu_to_be32(sn << 28 | cmd | ci); doorbell[0] = cpu_to_be32(sn << 28 | cmd | ci);
doorbell[1] = cpu_to_be32(cq->cqn); doorbell[1] = cpu_to_be32(cq->cqn);
mlx5_write64(doorbell, uar_page + MLX5_CQ_DOORBELL, doorbell_lock); mlx5_write64(doorbell, uar_page + MLX5_CQ_DOORBELL, NULL);
} }
int mlx5_init_cq_table(struct mlx5_core_dev *dev); int mlx5_init_cq_table(struct mlx5_core_dev *dev);
......
...@@ -212,10 +212,20 @@ enum { ...@@ -212,10 +212,20 @@ enum {
}; };
enum { enum {
MLX5_BF_REGS_PER_PAGE = 4, MLX5_ADAPTER_PAGE_SHIFT = 12,
MLX5_MAX_UAR_PAGES = 1 << 8, MLX5_ADAPTER_PAGE_SIZE = 1 << MLX5_ADAPTER_PAGE_SHIFT,
MLX5_NON_FP_BF_REGS_PER_PAGE = 2, };
MLX5_MAX_UUARS = MLX5_MAX_UAR_PAGES * MLX5_NON_FP_BF_REGS_PER_PAGE,
enum {
MLX5_BFREGS_PER_UAR = 4,
MLX5_MAX_UARS = 1 << 8,
MLX5_NON_FP_BFREGS_PER_UAR = 2,
MLX5_FP_BFREGS_PER_UAR = MLX5_BFREGS_PER_UAR -
MLX5_NON_FP_BFREGS_PER_UAR,
MLX5_MAX_BFREGS = MLX5_MAX_UARS *
MLX5_NON_FP_BFREGS_PER_UAR,
MLX5_UARS_IN_PAGE = PAGE_SIZE / MLX5_ADAPTER_PAGE_SIZE,
MLX5_NON_FP_BFREGS_IN_PAGE = MLX5_NON_FP_BFREGS_PER_UAR * MLX5_UARS_IN_PAGE,
}; };
enum { enum {
...@@ -388,11 +398,6 @@ enum { ...@@ -388,11 +398,6 @@ enum {
MLX5_MAX_PAGE_SHIFT = 31 MLX5_MAX_PAGE_SHIFT = 31
}; };
enum {
MLX5_ADAPTER_PAGE_SHIFT = 12,
MLX5_ADAPTER_PAGE_SIZE = 1 << MLX5_ADAPTER_PAGE_SHIFT,
};
enum { enum {
MLX5_CAP_OFF_CMDIF_CSUM = 46, MLX5_CAP_OFF_CMDIF_CSUM = 46,
}; };
......
...@@ -68,10 +68,12 @@ static inline void mlx5_write64(__be32 val[2], void __iomem *dest, ...@@ -68,10 +68,12 @@ static inline void mlx5_write64(__be32 val[2], void __iomem *dest,
{ {
unsigned long flags; unsigned long flags;
spin_lock_irqsave(doorbell_lock, flags); if (doorbell_lock)
spin_lock_irqsave(doorbell_lock, flags);
__raw_writel((__force u32) val[0], dest); __raw_writel((__force u32) val[0], dest);
__raw_writel((__force u32) val[1], dest + 4); __raw_writel((__force u32) val[1], dest + 4);
spin_unlock_irqrestore(doorbell_lock, flags); if (doorbell_lock)
spin_unlock_irqrestore(doorbell_lock, flags);
} }
#endif #endif
......
...@@ -187,36 +187,18 @@ enum mlx5_eq_type { ...@@ -187,36 +187,18 @@ enum mlx5_eq_type {
#endif #endif
}; };
struct mlx5_uuar_info { struct mlx5_bfreg_info {
struct mlx5_uar *uars; u32 *sys_pages;
int num_uars; int num_low_latency_bfregs;
int num_low_latency_uuars;
unsigned long *bitmap;
unsigned int *count; unsigned int *count;
struct mlx5_bf *bfs;
/* /*
* protect uuar allocation data structs * protect bfreg allocation data structs
*/ */
struct mutex lock; struct mutex lock;
u32 ver; u32 ver;
}; bool lib_uar_4k;
u32 num_sys_pages;
struct mlx5_bf {
void __iomem *reg;
void __iomem *regreg;
int buf_size;
struct mlx5_uar *uar;
unsigned long offset;
int need_lock;
/* protect blue flame buffer selection when needed
*/
spinlock_t lock;
/* serialize 64 bit writes when done as two 32 bit accesses
*/
spinlock_t lock32;
int uuarn;
}; };
struct mlx5_cmd_first { struct mlx5_cmd_first {
...@@ -451,14 +433,38 @@ struct mlx5_eq_table { ...@@ -451,14 +433,38 @@ struct mlx5_eq_table {
spinlock_t lock; spinlock_t lock;
}; };
struct mlx5_uar { struct mlx5_uars_page {
u32 index;
struct list_head bf_list;
unsigned free_bf_bmap;
void __iomem *bf_map;
void __iomem *map; void __iomem *map;
bool wc;
u32 index;
struct list_head list;
unsigned int bfregs;
unsigned long *reg_bitmap; /* for non fast path bf regs */
unsigned long *fp_bitmap;
unsigned int reg_avail;
unsigned int fp_avail;
struct kref ref_count;
struct mlx5_core_dev *mdev;
}; };
struct mlx5_bfreg_head {
/* protect blue flame registers allocations */
struct mutex lock;
struct list_head list;
};
struct mlx5_bfreg_data {
struct mlx5_bfreg_head reg_head;
struct mlx5_bfreg_head wc_head;
};
struct mlx5_sq_bfreg {
void __iomem *map;
struct mlx5_uars_page *up;
bool wc;
u32 index;
unsigned int offset;
};
struct mlx5_core_health { struct mlx5_core_health {
struct health_buffer __iomem *health; struct health_buffer __iomem *health;
...@@ -578,8 +584,6 @@ struct mlx5_priv { ...@@ -578,8 +584,6 @@ struct mlx5_priv {
struct mlx5_eq_table eq_table; struct mlx5_eq_table eq_table;
struct msix_entry *msix_arr; struct msix_entry *msix_arr;
struct mlx5_irq_info *irq_info; struct mlx5_irq_info *irq_info;
struct mlx5_uuar_info uuari;
MLX5_DECLARE_DOORBELL_LOCK(cq_uar_lock);
/* pages stuff */ /* pages stuff */
struct workqueue_struct *pg_wq; struct workqueue_struct *pg_wq;
...@@ -644,6 +648,8 @@ struct mlx5_priv { ...@@ -644,6 +648,8 @@ struct mlx5_priv {
void *pfault_ctx; void *pfault_ctx;
struct srcu_struct pfault_srcu; struct srcu_struct pfault_srcu;
#endif #endif
struct mlx5_bfreg_data bfregs;
struct mlx5_uars_page *uar;
}; };
enum mlx5_device_state { enum mlx5_device_state {
...@@ -712,7 +718,6 @@ struct mlx5_td { ...@@ -712,7 +718,6 @@ struct mlx5_td {
}; };
struct mlx5e_resources { struct mlx5e_resources {
struct mlx5_uar cq_uar;
u32 pdn; u32 pdn;
struct mlx5_td td; struct mlx5_td td;
struct mlx5_core_mkey mkey; struct mlx5_core_mkey mkey;
...@@ -902,11 +907,6 @@ void mlx5_cmd_mbox_status(void *out, u8 *status, u32 *syndrome); ...@@ -902,11 +907,6 @@ void mlx5_cmd_mbox_status(void *out, u8 *status, u32 *syndrome);
int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type); int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type);
int mlx5_cmd_alloc_uar(struct mlx5_core_dev *dev, u32 *uarn); int mlx5_cmd_alloc_uar(struct mlx5_core_dev *dev, u32 *uarn);
int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn); int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn);
int mlx5_alloc_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari);
int mlx5_free_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari);
int mlx5_alloc_map_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar,
bool map_wc);
void mlx5_unmap_free_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar);
void mlx5_health_cleanup(struct mlx5_core_dev *dev); void mlx5_health_cleanup(struct mlx5_core_dev *dev);
int mlx5_health_init(struct mlx5_core_dev *dev); int mlx5_health_init(struct mlx5_core_dev *dev);
void mlx5_start_health_poll(struct mlx5_core_dev *dev); void mlx5_start_health_poll(struct mlx5_core_dev *dev);
...@@ -972,7 +972,7 @@ void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec); ...@@ -972,7 +972,7 @@ void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec);
void mlx5_cq_event(struct mlx5_core_dev *dev, u32 cqn, int event_type); void mlx5_cq_event(struct mlx5_core_dev *dev, u32 cqn, int event_type);
int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx, int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
int nent, u64 mask, const char *name, int nent, u64 mask, const char *name,
struct mlx5_uar *uar, enum mlx5_eq_type type); enum mlx5_eq_type type);
int mlx5_destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq); int mlx5_destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
int mlx5_start_eqs(struct mlx5_core_dev *dev); int mlx5_start_eqs(struct mlx5_core_dev *dev);
int mlx5_stop_eqs(struct mlx5_core_dev *dev); int mlx5_stop_eqs(struct mlx5_core_dev *dev);
...@@ -1021,6 +1021,9 @@ void mlx5_cleanup_rl_table(struct mlx5_core_dev *dev); ...@@ -1021,6 +1021,9 @@ void mlx5_cleanup_rl_table(struct mlx5_core_dev *dev);
int mlx5_rl_add_rate(struct mlx5_core_dev *dev, u32 rate, u16 *index); int mlx5_rl_add_rate(struct mlx5_core_dev *dev, u32 rate, u16 *index);
void mlx5_rl_remove_rate(struct mlx5_core_dev *dev, u32 rate); void mlx5_rl_remove_rate(struct mlx5_core_dev *dev, u32 rate);
bool mlx5_rl_is_in_range(struct mlx5_core_dev *dev, u32 rate); bool mlx5_rl_is_in_range(struct mlx5_core_dev *dev, u32 rate);
int mlx5_alloc_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg,
bool map_wc, bool fast_path);
void mlx5_free_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg);
static inline int fw_initializing(struct mlx5_core_dev *dev) static inline int fw_initializing(struct mlx5_core_dev *dev)
{ {
...@@ -1080,6 +1083,8 @@ int mlx5_cmd_create_vport_lag(struct mlx5_core_dev *dev); ...@@ -1080,6 +1083,8 @@ int mlx5_cmd_create_vport_lag(struct mlx5_core_dev *dev);
int mlx5_cmd_destroy_vport_lag(struct mlx5_core_dev *dev); int mlx5_cmd_destroy_vport_lag(struct mlx5_core_dev *dev);
bool mlx5_lag_is_active(struct mlx5_core_dev *dev); bool mlx5_lag_is_active(struct mlx5_core_dev *dev);
struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev); struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev);
struct mlx5_uars_page *mlx5_get_uars_page(struct mlx5_core_dev *mdev);
void mlx5_put_uars_page(struct mlx5_core_dev *mdev, struct mlx5_uars_page *up);
struct mlx5_profile { struct mlx5_profile {
u64 mask; u64 mask;
......
...@@ -905,7 +905,8 @@ struct mlx5_ifc_cmd_hca_cap_bits { ...@@ -905,7 +905,8 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 uc[0x1]; u8 uc[0x1];
u8 rc[0x1]; u8 rc[0x1];
u8 reserved_at_240[0xa]; u8 uar_4k[0x1];
u8 reserved_at_241[0x9];
u8 uar_sz[0x6]; u8 uar_sz[0x6];
u8 reserved_at_250[0x8]; u8 reserved_at_250[0x8];
u8 log_pg_sz[0x8]; u8 log_pg_sz[0x8];
...@@ -997,7 +998,9 @@ struct mlx5_ifc_cmd_hca_cap_bits { ...@@ -997,7 +998,9 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 device_frequency_mhz[0x20]; u8 device_frequency_mhz[0x20];
u8 device_frequency_khz[0x20]; u8 device_frequency_khz[0x20];
u8 reserved_at_500[0x80]; u8 reserved_at_500[0x20];
u8 num_of_uars_per_page[0x20];
u8 reserved_at_540[0x40];
u8 reserved_at_580[0x3f]; u8 reserved_at_580[0x3f];
u8 cqe_compression[0x1]; u8 cqe_compression[0x1];
......
...@@ -61,19 +61,24 @@ enum { ...@@ -61,19 +61,24 @@ enum {
*/ */
struct mlx5_ib_alloc_ucontext_req { struct mlx5_ib_alloc_ucontext_req {
__u32 total_num_uuars; __u32 total_num_bfregs;
__u32 num_low_latency_uuars; __u32 num_low_latency_bfregs;
};
enum mlx5_lib_caps {
MLX5_LIB_CAP_4K_UAR = (u64)1 << 0,
}; };
struct mlx5_ib_alloc_ucontext_req_v2 { struct mlx5_ib_alloc_ucontext_req_v2 {
__u32 total_num_uuars; __u32 total_num_bfregs;
__u32 num_low_latency_uuars; __u32 num_low_latency_bfregs;
__u32 flags; __u32 flags;
__u32 comp_mask; __u32 comp_mask;
__u8 max_cqe_version; __u8 max_cqe_version;
__u8 reserved0; __u8 reserved0;
__u16 reserved1; __u16 reserved1;
__u32 reserved2; __u32 reserved2;
__u64 lib_caps;
}; };
enum mlx5_ib_alloc_ucontext_resp_mask { enum mlx5_ib_alloc_ucontext_resp_mask {
...@@ -88,7 +93,7 @@ enum mlx5_user_cmds_supp_uhw { ...@@ -88,7 +93,7 @@ enum mlx5_user_cmds_supp_uhw {
struct mlx5_ib_alloc_ucontext_resp { struct mlx5_ib_alloc_ucontext_resp {
__u32 qp_tab_size; __u32 qp_tab_size;
__u32 bf_reg_size; __u32 bf_reg_size;
__u32 tot_uuars; __u32 tot_bfregs;
__u32 cache_line_size; __u32 cache_line_size;
__u16 max_sq_desc_sz; __u16 max_sq_desc_sz;
__u16 max_rq_desc_sz; __u16 max_rq_desc_sz;
...@@ -103,6 +108,8 @@ struct mlx5_ib_alloc_ucontext_resp { ...@@ -103,6 +108,8 @@ struct mlx5_ib_alloc_ucontext_resp {
__u8 cmds_supp_uhw; __u8 cmds_supp_uhw;
__u16 reserved2; __u16 reserved2;
__u64 hca_core_clock_offset; __u64 hca_core_clock_offset;
__u32 log_uar_size;
__u32 num_uars_per_page;
}; };
struct mlx5_ib_alloc_pd_resp { struct mlx5_ib_alloc_pd_resp {
...@@ -241,7 +248,7 @@ struct mlx5_ib_create_qp_rss { ...@@ -241,7 +248,7 @@ struct mlx5_ib_create_qp_rss {
}; };
struct mlx5_ib_create_qp_resp { struct mlx5_ib_create_qp_resp {
__u32 uuar_index; __u32 bfreg_index;
}; };
struct mlx5_ib_alloc_mw { struct mlx5_ib_alloc_mw {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment