Commit 27827786 authored by Saeed Mahameed's avatar Saeed Mahameed Committed by Leon Romanovsky

{net,IB}/mlx5: CQ commands via mlx5 ifc

Remove old representation of manually created CQ commands layout,
and use mlx5_ifc canonical structures and defines.
Signed-off-by: default avatarSaeed Mahameed <saeedm@mellanox.com>
Signed-off-by: default avatarLeon Romanovsky <leon@kernel.org>
parent 73b626c1
...@@ -747,14 +747,16 @@ static int alloc_cq_buf(struct mlx5_ib_dev *dev, struct mlx5_ib_cq_buf *buf, ...@@ -747,14 +747,16 @@ static int alloc_cq_buf(struct mlx5_ib_dev *dev, struct mlx5_ib_cq_buf *buf,
static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata, static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata,
struct ib_ucontext *context, struct mlx5_ib_cq *cq, struct ib_ucontext *context, struct mlx5_ib_cq *cq,
int entries, struct mlx5_create_cq_mbox_in **cqb, int entries, u32 **cqb,
int *cqe_size, int *index, int *inlen) int *cqe_size, int *index, int *inlen)
{ {
struct mlx5_ib_create_cq ucmd; struct mlx5_ib_create_cq ucmd;
size_t ucmdlen; size_t ucmdlen;
int page_shift; int page_shift;
__be64 *pas;
int npages; int npages;
int ncont; int ncont;
void *cqc;
int err; int err;
ucmdlen = ucmdlen =
...@@ -792,14 +794,20 @@ static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata, ...@@ -792,14 +794,20 @@ static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata,
mlx5_ib_dbg(dev, "addr 0x%llx, size %u, npages %d, page_shift %d, ncont %d\n", mlx5_ib_dbg(dev, "addr 0x%llx, size %u, npages %d, page_shift %d, ncont %d\n",
ucmd.buf_addr, entries * ucmd.cqe_size, npages, page_shift, ncont); ucmd.buf_addr, entries * ucmd.cqe_size, npages, page_shift, ncont);
*inlen = sizeof(**cqb) + sizeof(*(*cqb)->pas) * ncont; *inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
MLX5_FLD_SZ_BYTES(create_cq_in, pas[0]) * ncont;
*cqb = mlx5_vzalloc(*inlen); *cqb = mlx5_vzalloc(*inlen);
if (!*cqb) { if (!*cqb) {
err = -ENOMEM; err = -ENOMEM;
goto err_db; goto err_db;
} }
mlx5_ib_populate_pas(dev, cq->buf.umem, page_shift, (*cqb)->pas, 0);
(*cqb)->ctx.log_pg_sz = page_shift - MLX5_ADAPTER_PAGE_SHIFT; pas = (__be64 *)MLX5_ADDR_OF(create_cq_in, *cqb, pas);
mlx5_ib_populate_pas(dev, cq->buf.umem, page_shift, pas, 0);
cqc = MLX5_ADDR_OF(create_cq_in, *cqb, cq_context);
MLX5_SET(cqc, cqc, log_page_size,
page_shift - MLX5_ADAPTER_PAGE_SHIFT);
*index = to_mucontext(context)->uuari.uars[0].index; *index = to_mucontext(context)->uuari.uars[0].index;
...@@ -834,9 +842,10 @@ static void init_cq_buf(struct mlx5_ib_cq *cq, struct mlx5_ib_cq_buf *buf) ...@@ -834,9 +842,10 @@ static void init_cq_buf(struct mlx5_ib_cq *cq, struct mlx5_ib_cq_buf *buf)
static int create_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq, static int create_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
int entries, int cqe_size, int entries, int cqe_size,
struct mlx5_create_cq_mbox_in **cqb, u32 **cqb, int *index, int *inlen)
int *index, int *inlen)
{ {
__be64 *pas;
void *cqc;
int err; int err;
err = mlx5_db_alloc(dev->mdev, &cq->db); err = mlx5_db_alloc(dev->mdev, &cq->db);
...@@ -853,15 +862,21 @@ static int create_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq, ...@@ -853,15 +862,21 @@ static int create_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
init_cq_buf(cq, &cq->buf); init_cq_buf(cq, &cq->buf);
*inlen = sizeof(**cqb) + sizeof(*(*cqb)->pas) * cq->buf.buf.npages; *inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
MLX5_FLD_SZ_BYTES(create_cq_in, pas[0]) * cq->buf.buf.npages;
*cqb = mlx5_vzalloc(*inlen); *cqb = mlx5_vzalloc(*inlen);
if (!*cqb) { if (!*cqb) {
err = -ENOMEM; err = -ENOMEM;
goto err_buf; goto err_buf;
} }
mlx5_fill_page_array(&cq->buf.buf, (*cqb)->pas);
(*cqb)->ctx.log_pg_sz = cq->buf.buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT; pas = (__be64 *)MLX5_ADDR_OF(create_cq_in, *cqb, pas);
mlx5_fill_page_array(&cq->buf.buf, pas);
cqc = MLX5_ADDR_OF(create_cq_in, *cqb, cq_context);
MLX5_SET(cqc, cqc, log_page_size,
cq->buf.buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT);
*index = dev->mdev->priv.uuari.uars[0].index; *index = dev->mdev->priv.uuari.uars[0].index;
return 0; return 0;
...@@ -895,11 +910,12 @@ struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev, ...@@ -895,11 +910,12 @@ struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev,
{ {
int entries = attr->cqe; int entries = attr->cqe;
int vector = attr->comp_vector; int vector = attr->comp_vector;
struct mlx5_create_cq_mbox_in *cqb = NULL;
struct mlx5_ib_dev *dev = to_mdev(ibdev); struct mlx5_ib_dev *dev = to_mdev(ibdev);
struct mlx5_ib_cq *cq; struct mlx5_ib_cq *cq;
int uninitialized_var(index); int uninitialized_var(index);
int uninitialized_var(inlen); int uninitialized_var(inlen);
u32 *cqb = NULL;
void *cqc;
int cqe_size; int cqe_size;
unsigned int irqn; unsigned int irqn;
int eqn; int eqn;
...@@ -945,19 +961,20 @@ struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev, ...@@ -945,19 +961,20 @@ struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev,
INIT_WORK(&cq->notify_work, notify_soft_wc_handler); INIT_WORK(&cq->notify_work, notify_soft_wc_handler);
} }
cq->cqe_size = cqe_size;
cqb->ctx.cqe_sz_flags = cqe_sz_to_mlx_sz(cqe_size) << 5;
if (cq->create_flags & IB_CQ_FLAGS_IGNORE_OVERRUN)
cqb->ctx.cqe_sz_flags |= (1 << 1);
cqb->ctx.log_sz_usr_page = cpu_to_be32((ilog2(entries) << 24) | index);
err = mlx5_vector2eqn(dev->mdev, vector, &eqn, &irqn); err = mlx5_vector2eqn(dev->mdev, vector, &eqn, &irqn);
if (err) if (err)
goto err_cqb; goto err_cqb;
cqb->ctx.c_eqn = cpu_to_be16(eqn); cq->cqe_size = cqe_size;
cqb->ctx.db_record_addr = cpu_to_be64(cq->db.dma);
cqc = MLX5_ADDR_OF(create_cq_in, cqb, cq_context);
MLX5_SET(cqc, cqc, cqe_sz, cqe_sz_to_mlx_sz(cqe_size));
MLX5_SET(cqc, cqc, log_cq_size, ilog2(entries));
MLX5_SET(cqc, cqc, uar_page, index);
MLX5_SET(cqc, cqc, c_eqn, eqn);
MLX5_SET64(cqc, cqc, dbr_addr, cq->db.dma);
if (cq->create_flags & IB_CQ_FLAGS_IGNORE_OVERRUN)
MLX5_SET(cqc, cqc, oi, 1);
err = mlx5_core_create_cq(dev->mdev, &cq->mcq, cqb, inlen); err = mlx5_core_create_cq(dev->mdev, &cq->mcq, cqb, inlen);
if (err) if (err)
...@@ -1088,27 +1105,15 @@ void mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq) ...@@ -1088,27 +1105,15 @@ void mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq)
int mlx5_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period) int mlx5_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
{ {
struct mlx5_modify_cq_mbox_in *in;
struct mlx5_ib_dev *dev = to_mdev(cq->device); struct mlx5_ib_dev *dev = to_mdev(cq->device);
struct mlx5_ib_cq *mcq = to_mcq(cq); struct mlx5_ib_cq *mcq = to_mcq(cq);
int err; int err;
u32 fsel;
if (!MLX5_CAP_GEN(dev->mdev, cq_moderation)) if (!MLX5_CAP_GEN(dev->mdev, cq_moderation))
return -ENOSYS; return -ENOSYS;
in = kzalloc(sizeof(*in), GFP_KERNEL); err = mlx5_core_modify_cq_moderation(dev->mdev, &mcq->mcq,
if (!in) cq_period, cq_count);
return -ENOMEM;
in->cqn = cpu_to_be32(mcq->mcq.cqn);
fsel = (MLX5_CQ_MODIFY_PERIOD | MLX5_CQ_MODIFY_COUNT);
in->ctx.cq_period = cpu_to_be16(cq_period);
in->ctx.cq_max_count = cpu_to_be16(cq_count);
in->field_select = cpu_to_be32(fsel);
err = mlx5_core_modify_cq(dev->mdev, &mcq->mcq, in, sizeof(*in));
kfree(in);
if (err) if (err)
mlx5_ib_warn(dev, "modify cq 0x%x failed\n", mcq->mcq.cqn); mlx5_ib_warn(dev, "modify cq 0x%x failed\n", mcq->mcq.cqn);
...@@ -1241,9 +1246,11 @@ int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata) ...@@ -1241,9 +1246,11 @@ int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
{ {
struct mlx5_ib_dev *dev = to_mdev(ibcq->device); struct mlx5_ib_dev *dev = to_mdev(ibcq->device);
struct mlx5_ib_cq *cq = to_mcq(ibcq); struct mlx5_ib_cq *cq = to_mcq(ibcq);
struct mlx5_modify_cq_mbox_in *in; void *cqc;
u32 *in;
int err; int err;
int npas; int npas;
__be64 *pas;
int page_shift; int page_shift;
int inlen; int inlen;
int uninitialized_var(cqe_size); int uninitialized_var(cqe_size);
...@@ -1285,28 +1292,37 @@ int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata) ...@@ -1285,28 +1292,37 @@ int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
if (err) if (err)
goto ex; goto ex;
inlen = sizeof(*in) + npas * sizeof(in->pas[0]); inlen = MLX5_ST_SZ_BYTES(modify_cq_in) +
MLX5_FLD_SZ_BYTES(modify_cq_in, pas[0]) * npas;
in = mlx5_vzalloc(inlen); in = mlx5_vzalloc(inlen);
if (!in) { if (!in) {
err = -ENOMEM; err = -ENOMEM;
goto ex_resize; goto ex_resize;
} }
pas = (__be64 *)MLX5_ADDR_OF(modify_cq_in, in, pas);
if (udata) if (udata)
mlx5_ib_populate_pas(dev, cq->resize_umem, page_shift, mlx5_ib_populate_pas(dev, cq->resize_umem, page_shift,
in->pas, 0); pas, 0);
else else
mlx5_fill_page_array(&cq->resize_buf->buf, in->pas); mlx5_fill_page_array(&cq->resize_buf->buf, pas);
in->field_select = cpu_to_be32(MLX5_MODIFY_CQ_MASK_LOG_SIZE | MLX5_SET(modify_cq_in, in,
modify_field_select_resize_field_select.resize_field_select.resize_field_select,
MLX5_MODIFY_CQ_MASK_LOG_SIZE |
MLX5_MODIFY_CQ_MASK_PG_OFFSET | MLX5_MODIFY_CQ_MASK_PG_OFFSET |
MLX5_MODIFY_CQ_MASK_PG_SIZE); MLX5_MODIFY_CQ_MASK_PG_SIZE);
in->ctx.log_pg_sz = page_shift - MLX5_ADAPTER_PAGE_SHIFT;
in->ctx.cqe_sz_flags = cqe_sz_to_mlx_sz(cqe_size) << 5; cqc = MLX5_ADDR_OF(modify_cq_in, in, cq_context);
in->ctx.page_offset = 0;
in->ctx.log_sz_usr_page = cpu_to_be32(ilog2(entries) << 24); MLX5_SET(cqc, cqc, log_page_size,
in->hdr.opmod = cpu_to_be16(MLX5_CQ_OPMOD_RESIZE); page_shift - MLX5_ADAPTER_PAGE_SHIFT);
in->cqn = cpu_to_be32(cq->mcq.cqn); MLX5_SET(cqc, cqc, cqe_sz, cqe_sz_to_mlx_sz(cqe_size));
MLX5_SET(cqc, cqc, log_cq_size, ilog2(entries));
MLX5_SET(modify_cq_in, in, op_mod, MLX5_CQ_OPMOD_RESIZE);
MLX5_SET(modify_cq_in, in, cqn, cq->mcq.cqn);
err = mlx5_core_modify_cq(dev->mdev, &cq->mcq, in, inlen); err = mlx5_core_modify_cq(dev->mdev, &cq->mcq, in, inlen);
if (err) if (err)
......
...@@ -134,33 +134,30 @@ void mlx5_cq_event(struct mlx5_core_dev *dev, u32 cqn, int event_type) ...@@ -134,33 +134,30 @@ void mlx5_cq_event(struct mlx5_core_dev *dev, u32 cqn, int event_type)
complete(&cq->free); complete(&cq->free);
} }
int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
struct mlx5_create_cq_mbox_in *in, int inlen) u32 *in, int inlen)
{ {
int err;
struct mlx5_cq_table *table = &dev->priv.cq_table; struct mlx5_cq_table *table = &dev->priv.cq_table;
struct mlx5_create_cq_mbox_out out; u32 out[MLX5_ST_SZ_DW(create_cq_out)];
struct mlx5_destroy_cq_mbox_in din; u32 din[MLX5_ST_SZ_DW(destroy_cq_in)];
struct mlx5_destroy_cq_mbox_out dout; u32 dout[MLX5_ST_SZ_DW(destroy_cq_out)];
int eqn = MLX5_GET(cqc, MLX5_ADDR_OF(create_cq_in, in, cq_context), int eqn = MLX5_GET(cqc, MLX5_ADDR_OF(create_cq_in, in, cq_context),
c_eqn); c_eqn);
struct mlx5_eq *eq; struct mlx5_eq *eq;
int err;
eq = mlx5_eqn2eq(dev, eqn); eq = mlx5_eqn2eq(dev, eqn);
if (IS_ERR(eq)) if (IS_ERR(eq))
return PTR_ERR(eq); return PTR_ERR(eq);
in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_CQ); memset(out, 0, sizeof(out));
memset(&out, 0, sizeof(out)); MLX5_SET(create_cq_in, in, opcode, MLX5_CMD_OP_CREATE_CQ);
err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out)); err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
err = err ? : mlx5_cmd_status_to_err_v2(out);
if (err) if (err)
return err; return err;
if (out.hdr.status) cq->cqn = MLX5_GET(create_cq_out, out, cqn);
return mlx5_cmd_status_to_err(&out.hdr);
cq->cqn = be32_to_cpu(out.cqn) & 0xffffff;
cq->cons_index = 0; cq->cons_index = 0;
cq->arm_sn = 0; cq->arm_sn = 0;
atomic_set(&cq->refcount, 1); atomic_set(&cq->refcount, 1);
...@@ -186,19 +183,21 @@ int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, ...@@ -186,19 +183,21 @@ int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
return 0; return 0;
err_cmd: err_cmd:
memset(&din, 0, sizeof(din)); memset(din, 0, sizeof(din));
memset(&dout, 0, sizeof(dout)); memset(dout, 0, sizeof(dout));
din.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_CQ); MLX5_SET(destroy_cq_in, din, opcode, MLX5_CMD_OP_DESTROY_CQ);
mlx5_cmd_exec(dev, &din, sizeof(din), &dout, sizeof(dout)); MLX5_SET(destroy_cq_in, din, cqn, cq->cqn);
return err; err = mlx5_cmd_exec(dev, din, sizeof(din), dout, sizeof(dout));
return err ? : mlx5_cmd_status_to_err_v2(out);
} }
EXPORT_SYMBOL(mlx5_core_create_cq); EXPORT_SYMBOL(mlx5_core_create_cq);
int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq) int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq)
{ {
struct mlx5_cq_table *table = &dev->priv.cq_table; struct mlx5_cq_table *table = &dev->priv.cq_table;
struct mlx5_destroy_cq_mbox_in in; u32 out[MLX5_ST_SZ_DW(destroy_cq_out)] = {0};
struct mlx5_destroy_cq_mbox_out out; u32 in[MLX5_ST_SZ_DW(destroy_cq_in)] = {0};
struct mlx5_core_cq *tmp; struct mlx5_core_cq *tmp;
int err; int err;
...@@ -214,17 +213,13 @@ int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq) ...@@ -214,17 +213,13 @@ int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq)
return -EINVAL; return -EINVAL;
} }
memset(&in, 0, sizeof(in)); MLX5_SET(destroy_cq_in, in, opcode, MLX5_CMD_OP_DESTROY_CQ);
memset(&out, 0, sizeof(out)); MLX5_SET(destroy_cq_in, in, cqn, cq->cqn);
in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_CQ); err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
in.cqn = cpu_to_be32(cq->cqn); err = err ? : mlx5_cmd_status_to_err_v2(out);
err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
if (err) if (err)
return err; return err;
if (out.hdr.status)
return mlx5_cmd_status_to_err(&out.hdr);
synchronize_irq(cq->irqn); synchronize_irq(cq->irqn);
mlx5_debug_cq_remove(dev, cq); mlx5_debug_cq_remove(dev, cq);
...@@ -237,44 +232,28 @@ int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq) ...@@ -237,44 +232,28 @@ int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq)
EXPORT_SYMBOL(mlx5_core_destroy_cq); EXPORT_SYMBOL(mlx5_core_destroy_cq);
int mlx5_core_query_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, int mlx5_core_query_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
struct mlx5_query_cq_mbox_out *out) u32 *out, int outlen)
{ {
struct mlx5_query_cq_mbox_in in; u32 in[MLX5_ST_SZ_DW(query_cq_in)] = {0};
int err; int err;
memset(&in, 0, sizeof(in)); MLX5_SET(query_cq_in, in, opcode, MLX5_CMD_OP_QUERY_CQ);
memset(out, 0, sizeof(*out)); MLX5_SET(query_cq_in, in, cqn, cq->cqn);
in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_CQ);
in.cqn = cpu_to_be32(cq->cqn);
err = mlx5_cmd_exec(dev, &in, sizeof(in), out, sizeof(*out));
if (err)
return err;
if (out->hdr.status)
return mlx5_cmd_status_to_err(&out->hdr);
return err; err = mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
return err ? : mlx5_cmd_status_to_err_v2(out);
} }
EXPORT_SYMBOL(mlx5_core_query_cq); EXPORT_SYMBOL(mlx5_core_query_cq);
int mlx5_core_modify_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, int mlx5_core_modify_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
struct mlx5_modify_cq_mbox_in *in, int in_sz) u32 *in, int inlen)
{ {
struct mlx5_modify_cq_mbox_out out; u32 out[MLX5_ST_SZ_DW(modify_cq_out)] = {0};
int err; int err;
memset(&out, 0, sizeof(out)); MLX5_SET(modify_cq_in, in, opcode, MLX5_CMD_OP_MODIFY_CQ);
in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MODIFY_CQ); err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
err = mlx5_cmd_exec(dev, in, in_sz, &out, sizeof(out)); return err ? : mlx5_cmd_status_to_err_v2(out);
if (err)
return err;
if (out.hdr.status)
return mlx5_cmd_status_to_err(&out.hdr);
return 0;
} }
EXPORT_SYMBOL(mlx5_core_modify_cq); EXPORT_SYMBOL(mlx5_core_modify_cq);
...@@ -283,18 +262,20 @@ int mlx5_core_modify_cq_moderation(struct mlx5_core_dev *dev, ...@@ -283,18 +262,20 @@ int mlx5_core_modify_cq_moderation(struct mlx5_core_dev *dev,
u16 cq_period, u16 cq_period,
u16 cq_max_count) u16 cq_max_count)
{ {
struct mlx5_modify_cq_mbox_in in; u32 in[MLX5_ST_SZ_DW(modify_cq_in)] = {0};
void *cqc;
memset(&in, 0, sizeof(in));
MLX5_SET(modify_cq_in, in, cqn, cq->cqn);
in.cqn = cpu_to_be32(cq->cqn); cqc = MLX5_ADDR_OF(modify_cq_in, in, cq_context);
in.ctx.cq_period = cpu_to_be16(cq_period); MLX5_SET(cqc, cqc, cq_period, cq_period);
in.ctx.cq_max_count = cpu_to_be16(cq_max_count); MLX5_SET(cqc, cqc, cq_max_count, cq_max_count);
in.field_select = cpu_to_be32(MLX5_CQ_MODIFY_PERIOD | MLX5_SET(modify_cq_in, in,
MLX5_CQ_MODIFY_COUNT); modify_field_select_resize_field_select.modify_field_select.modify_field_select,
MLX5_CQ_MODIFY_PERIOD | MLX5_CQ_MODIFY_COUNT);
return mlx5_core_modify_cq(dev, cq, &in, sizeof(in));
return mlx5_core_modify_cq(dev, cq, in, sizeof(in));
} }
EXPORT_SYMBOL(mlx5_core_modify_cq_moderation);
int mlx5_init_cq_table(struct mlx5_core_dev *dev) int mlx5_init_cq_table(struct mlx5_core_dev *dev)
{ {
......
...@@ -395,37 +395,37 @@ static u64 eq_read_field(struct mlx5_core_dev *dev, struct mlx5_eq *eq, ...@@ -395,37 +395,37 @@ static u64 eq_read_field(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
static u64 cq_read_field(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, static u64 cq_read_field(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
int index) int index)
{ {
struct mlx5_query_cq_mbox_out *out; int outlen = MLX5_ST_SZ_BYTES(query_cq_out);
struct mlx5_cq_context *ctx;
u64 param = 0; u64 param = 0;
void *ctx;
u32 *out;
int err; int err;
out = kzalloc(sizeof(*out), GFP_KERNEL); out = mlx5_vzalloc(outlen);
if (!out) if (!out)
return param; return param;
ctx = &out->ctx; err = mlx5_core_query_cq(dev, cq, out, outlen);
err = mlx5_core_query_cq(dev, cq, out);
if (err) { if (err) {
mlx5_core_warn(dev, "failed to query cq\n"); mlx5_core_warn(dev, "failed to query cq\n");
goto out; goto out;
} }
ctx = MLX5_ADDR_OF(query_cq_out, out, cq_context);
switch (index) { switch (index) {
case CQ_PID: case CQ_PID:
param = cq->pid; param = cq->pid;
break; break;
case CQ_NUM_CQES: case CQ_NUM_CQES:
param = 1 << ((be32_to_cpu(ctx->log_sz_usr_page) >> 24) & 0x1f); param = 1 << MLX5_GET(cqc, ctx, log_cq_size);
break; break;
case CQ_LOG_PG_SZ: case CQ_LOG_PG_SZ:
param = (ctx->log_pg_sz & 0x1f) + 12; param = MLX5_GET(cqc, ctx, log_page_size);
break; break;
} }
out: out:
kfree(out); kvfree(out);
return param; return param;
} }
......
...@@ -170,12 +170,12 @@ static inline void mlx5_cq_arm(struct mlx5_core_cq *cq, u32 cmd, ...@@ -170,12 +170,12 @@ static inline void mlx5_cq_arm(struct mlx5_core_cq *cq, u32 cmd,
int mlx5_init_cq_table(struct mlx5_core_dev *dev); int mlx5_init_cq_table(struct mlx5_core_dev *dev);
void mlx5_cleanup_cq_table(struct mlx5_core_dev *dev); void mlx5_cleanup_cq_table(struct mlx5_core_dev *dev);
int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
struct mlx5_create_cq_mbox_in *in, int inlen); u32 *in, int inlen);
int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq); int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq);
int mlx5_core_query_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, int mlx5_core_query_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
struct mlx5_query_cq_mbox_out *out); u32 *out, int outlen);
int mlx5_core_modify_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, int mlx5_core_modify_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
struct mlx5_modify_cq_mbox_in *in, int in_sz); u32 *in, int inlen);
int mlx5_core_modify_cq_moderation(struct mlx5_core_dev *dev, int mlx5_core_modify_cq_moderation(struct mlx5_core_dev *dev,
struct mlx5_core_cq *cq, u16 cq_period, struct mlx5_core_cq *cq, u16 cq_period,
u16 cq_max_count); u16 cq_max_count);
......
...@@ -899,82 +899,6 @@ struct mlx5_arm_srq_mbox_out { ...@@ -899,82 +899,6 @@ struct mlx5_arm_srq_mbox_out {
u8 rsvd[8]; u8 rsvd[8];
}; };
struct mlx5_cq_context {
u8 status;
u8 cqe_sz_flags;
u8 st;
u8 rsvd3;
u8 rsvd4[6];
__be16 page_offset;
__be32 log_sz_usr_page;
__be16 cq_period;
__be16 cq_max_count;
__be16 rsvd20;
__be16 c_eqn;
u8 log_pg_sz;
u8 rsvd25[7];
__be32 last_notified_index;
__be32 solicit_producer_index;
__be32 consumer_counter;
__be32 producer_counter;
u8 rsvd48[8];
__be64 db_record_addr;
};
struct mlx5_create_cq_mbox_in {
struct mlx5_inbox_hdr hdr;
__be32 input_cqn;
u8 rsvdx[4];
struct mlx5_cq_context ctx;
u8 rsvd6[192];
__be64 pas[0];
};
struct mlx5_create_cq_mbox_out {
struct mlx5_outbox_hdr hdr;
__be32 cqn;
u8 rsvd0[4];
};
struct mlx5_destroy_cq_mbox_in {
struct mlx5_inbox_hdr hdr;
__be32 cqn;
u8 rsvd0[4];
};
struct mlx5_destroy_cq_mbox_out {
struct mlx5_outbox_hdr hdr;
u8 rsvd0[8];
};
struct mlx5_query_cq_mbox_in {
struct mlx5_inbox_hdr hdr;
__be32 cqn;
u8 rsvd0[4];
};
struct mlx5_query_cq_mbox_out {
struct mlx5_outbox_hdr hdr;
u8 rsvd0[8];
struct mlx5_cq_context ctx;
u8 rsvd6[16];
__be64 pas[0];
};
struct mlx5_modify_cq_mbox_in {
struct mlx5_inbox_hdr hdr;
__be32 cqn;
__be32 field_select;
struct mlx5_cq_context ctx;
u8 rsvd[192];
__be64 pas[0];
};
struct mlx5_modify_cq_mbox_out {
struct mlx5_outbox_hdr hdr;
u8 rsvd[8];
};
struct mlx5_enable_hca_mbox_in { struct mlx5_enable_hca_mbox_in {
struct mlx5_inbox_hdr hdr; struct mlx5_inbox_hdr hdr;
u8 rsvd[8]; u8 rsvd[8];
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment