Commit ed8ada39 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'rdma-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband

Pull infiniband updates from Roland Dreier:
 "Last batch of IB changes for 3.12: many mlx5 hardware driver fixes
  plus one trivial semicolon cleanup"

* tag 'rdma-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband:
  IB: Remove unnecessary semicolons
  IB/mlx5: Ensure proper synchronization accessing memory
  IB/mlx5: Fix alignment of reg umr gather buffers
  IB/mlx5: Fix eq names to display nicely in /proc/interrupts
  mlx5: Fix error code translation from firmware to driver
  IB/mlx5: Fix opt param mask according to firmware spec
  mlx5: Fix opt param mask for sq err to rts transition
  IB/mlx5: Disable atomic operations
  mlx5: Fix layout of struct mlx5_init_seg
  mlx5: Keep polling to reclaim pages while any returned
  IB/mlx5: Avoid async events on invalid port number
  IB/mlx5: Decrease memory consumption of mr caches
  mlx5: Remove checksum on command interface commands
  IB/mlx5: Fix memory leak in mlx5_ib_create_srq
  IB/mlx5: Flush cache workqueue before destroying it
  IB/mlx5: Fix send work queue size calculation
parents d6099aeb 59b5b28d
......@@ -141,7 +141,7 @@ static const char *to_qp_state_str(int state)
return "C2_QP_STATE_ERROR";
default:
return "<invalid QP state>";
};
}
}
void c2_ae_event(struct c2_dev *c2dev, u32 mq_index)
......
......@@ -164,6 +164,7 @@ int mlx5_vector2eqn(struct mlx5_ib_dev *dev, int vector, int *eqn, int *irqn)
static int alloc_comp_eqs(struct mlx5_ib_dev *dev)
{
struct mlx5_eq_table *table = &dev->mdev.priv.eq_table;
char name[MLX5_MAX_EQ_NAME];
struct mlx5_eq *eq, *n;
int ncomp_vec;
int nent;
......@@ -180,11 +181,10 @@ static int alloc_comp_eqs(struct mlx5_ib_dev *dev)
goto clean;
}
snprintf(eq->name, MLX5_MAX_EQ_NAME, "mlx5_comp%d", i);
snprintf(name, MLX5_MAX_EQ_NAME, "mlx5_comp%d", i);
err = mlx5_create_map_eq(&dev->mdev, eq,
i + MLX5_EQ_VEC_COMP_BASE, nent, 0,
eq->name,
&dev->mdev.priv.uuari.uars[0]);
name, &dev->mdev.priv.uuari.uars[0]);
if (err) {
kfree(eq);
goto clean;
......@@ -301,9 +301,8 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
props->max_srq_sge = max_rq_sg - 1;
props->max_fast_reg_page_list_len = (unsigned int)-1;
props->local_ca_ack_delay = dev->mdev.caps.local_ca_ack_delay;
props->atomic_cap = dev->mdev.caps.flags & MLX5_DEV_CAP_FLAG_ATOMIC ?
IB_ATOMIC_HCA : IB_ATOMIC_NONE;
props->masked_atomic_cap = IB_ATOMIC_HCA;
props->atomic_cap = IB_ATOMIC_NONE;
props->masked_atomic_cap = IB_ATOMIC_NONE;
props->max_pkeys = be16_to_cpup((__be16 *)(out_mad->data + 28));
props->max_mcast_grp = 1 << dev->mdev.caps.log_max_mcg;
props->max_mcast_qp_attach = dev->mdev.caps.max_qp_mcg;
......@@ -1006,6 +1005,11 @@ static void mlx5_ib_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event,
ibev.device = &ibdev->ib_dev;
ibev.element.port_num = port;
if (port < 1 || port > ibdev->num_ports) {
mlx5_ib_warn(ibdev, "warning: event on port %d\n", port);
return;
}
if (ibdev->ib_active)
ib_dispatch_event(&ibev);
}
......
......@@ -42,6 +42,10 @@ enum {
DEF_CACHE_SIZE = 10,
};
enum {
MLX5_UMR_ALIGN = 2048
};
static __be64 *mr_align(__be64 *ptr, int align)
{
unsigned long mask = align - 1;
......@@ -61,13 +65,11 @@ static int order2idx(struct mlx5_ib_dev *dev, int order)
static int add_keys(struct mlx5_ib_dev *dev, int c, int num)
{
struct device *ddev = dev->ib_dev.dma_device;
struct mlx5_mr_cache *cache = &dev->cache;
struct mlx5_cache_ent *ent = &cache->ent[c];
struct mlx5_create_mkey_mbox_in *in;
struct mlx5_ib_mr *mr;
int npages = 1 << ent->order;
int size = sizeof(u64) * npages;
int err = 0;
int i;
......@@ -83,21 +85,6 @@ static int add_keys(struct mlx5_ib_dev *dev, int c, int num)
}
mr->order = ent->order;
mr->umred = 1;
mr->pas = kmalloc(size + 0x3f, GFP_KERNEL);
if (!mr->pas) {
kfree(mr);
err = -ENOMEM;
goto out;
}
mr->dma = dma_map_single(ddev, mr_align(mr->pas, 0x40), size,
DMA_TO_DEVICE);
if (dma_mapping_error(ddev, mr->dma)) {
kfree(mr->pas);
kfree(mr);
err = -ENOMEM;
goto out;
}
in->seg.status = 1 << 6;
in->seg.xlt_oct_size = cpu_to_be32((npages + 1) / 2);
in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
......@@ -108,8 +95,6 @@ static int add_keys(struct mlx5_ib_dev *dev, int c, int num)
sizeof(*in));
if (err) {
mlx5_ib_warn(dev, "create mkey failed %d\n", err);
dma_unmap_single(ddev, mr->dma, size, DMA_TO_DEVICE);
kfree(mr->pas);
kfree(mr);
goto out;
}
......@@ -129,11 +114,9 @@ static int add_keys(struct mlx5_ib_dev *dev, int c, int num)
static void remove_keys(struct mlx5_ib_dev *dev, int c, int num)
{
struct device *ddev = dev->ib_dev.dma_device;
struct mlx5_mr_cache *cache = &dev->cache;
struct mlx5_cache_ent *ent = &cache->ent[c];
struct mlx5_ib_mr *mr;
int size;
int err;
int i;
......@@ -149,15 +132,11 @@ static void remove_keys(struct mlx5_ib_dev *dev, int c, int num)
ent->size--;
spin_unlock(&ent->lock);
err = mlx5_core_destroy_mkey(&dev->mdev, &mr->mmr);
if (err) {
if (err)
mlx5_ib_warn(dev, "failed destroy mkey\n");
} else {
size = ALIGN(sizeof(u64) * (1 << mr->order), 0x40);
dma_unmap_single(ddev, mr->dma, size, DMA_TO_DEVICE);
kfree(mr->pas);
else
kfree(mr);
}
}
}
static ssize_t size_write(struct file *filp, const char __user *buf,
......@@ -408,13 +387,12 @@ static void free_cached_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
static void clean_keys(struct mlx5_ib_dev *dev, int c)
{
struct device *ddev = dev->ib_dev.dma_device;
struct mlx5_mr_cache *cache = &dev->cache;
struct mlx5_cache_ent *ent = &cache->ent[c];
struct mlx5_ib_mr *mr;
int size;
int err;
cancel_delayed_work(&ent->dwork);
while (1) {
spin_lock(&ent->lock);
if (list_empty(&ent->head)) {
......@@ -427,15 +405,11 @@ static void clean_keys(struct mlx5_ib_dev *dev, int c)
ent->size--;
spin_unlock(&ent->lock);
err = mlx5_core_destroy_mkey(&dev->mdev, &mr->mmr);
if (err) {
if (err)
mlx5_ib_warn(dev, "failed destroy mkey\n");
} else {
size = ALIGN(sizeof(u64) * (1 << mr->order), 0x40);
dma_unmap_single(ddev, mr->dma, size, DMA_TO_DEVICE);
kfree(mr->pas);
else
kfree(mr);
}
}
}
static int mlx5_mr_cache_debugfs_init(struct mlx5_ib_dev *dev)
......@@ -540,13 +514,15 @@ int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev)
int i;
dev->cache.stopped = 1;
destroy_workqueue(dev->cache.wq);
flush_workqueue(dev->cache.wq);
mlx5_mr_cache_debugfs_cleanup(dev);
for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++)
clean_keys(dev, i);
destroy_workqueue(dev->cache.wq);
return 0;
}
......@@ -675,10 +651,12 @@ static struct mlx5_ib_mr *reg_umr(struct ib_pd *pd, struct ib_umem *umem,
int page_shift, int order, int access_flags)
{
struct mlx5_ib_dev *dev = to_mdev(pd->device);
struct device *ddev = dev->ib_dev.dma_device;
struct umr_common *umrc = &dev->umrc;
struct ib_send_wr wr, *bad;
struct mlx5_ib_mr *mr;
struct ib_sge sg;
int size = sizeof(u64) * npages;
int err;
int i;
......@@ -697,7 +675,22 @@ static struct mlx5_ib_mr *reg_umr(struct ib_pd *pd, struct ib_umem *umem,
if (!mr)
return ERR_PTR(-EAGAIN);
mlx5_ib_populate_pas(dev, umem, page_shift, mr_align(mr->pas, 0x40), 1);
mr->pas = kmalloc(size + MLX5_UMR_ALIGN - 1, GFP_KERNEL);
if (!mr->pas) {
err = -ENOMEM;
goto error;
}
mlx5_ib_populate_pas(dev, umem, page_shift,
mr_align(mr->pas, MLX5_UMR_ALIGN), 1);
mr->dma = dma_map_single(ddev, mr_align(mr->pas, MLX5_UMR_ALIGN), size,
DMA_TO_DEVICE);
if (dma_mapping_error(ddev, mr->dma)) {
kfree(mr->pas);
err = -ENOMEM;
goto error;
}
memset(&wr, 0, sizeof(wr));
wr.wr_id = (u64)(unsigned long)mr;
......@@ -718,6 +711,9 @@ static struct mlx5_ib_mr *reg_umr(struct ib_pd *pd, struct ib_umem *umem,
wait_for_completion(&mr->done);
up(&umrc->sem);
dma_unmap_single(ddev, mr->dma, size, DMA_TO_DEVICE);
kfree(mr->pas);
if (mr->status != IB_WC_SUCCESS) {
mlx5_ib_warn(dev, "reg umr failed\n");
err = -EFAULT;
......
......@@ -203,7 +203,7 @@ static int sq_overhead(enum ib_qp_type qp_type)
switch (qp_type) {
case IB_QPT_XRC_INI:
size = sizeof(struct mlx5_wqe_xrc_seg);
size += sizeof(struct mlx5_wqe_xrc_seg);
/* fall through */
case IB_QPT_RC:
size += sizeof(struct mlx5_wqe_ctrl_seg) +
......@@ -211,20 +211,23 @@ static int sq_overhead(enum ib_qp_type qp_type)
sizeof(struct mlx5_wqe_raddr_seg);
break;
case IB_QPT_XRC_TGT:
return 0;
case IB_QPT_UC:
size = sizeof(struct mlx5_wqe_ctrl_seg) +
size += sizeof(struct mlx5_wqe_ctrl_seg) +
sizeof(struct mlx5_wqe_raddr_seg);
break;
case IB_QPT_UD:
case IB_QPT_SMI:
case IB_QPT_GSI:
size = sizeof(struct mlx5_wqe_ctrl_seg) +
size += sizeof(struct mlx5_wqe_ctrl_seg) +
sizeof(struct mlx5_wqe_datagram_seg);
break;
case MLX5_IB_QPT_REG_UMR:
size = sizeof(struct mlx5_wqe_ctrl_seg) +
size += sizeof(struct mlx5_wqe_ctrl_seg) +
sizeof(struct mlx5_wqe_umr_ctrl_seg) +
sizeof(struct mlx5_mkey_seg);
break;
......@@ -270,7 +273,8 @@ static int calc_sq_size(struct mlx5_ib_dev *dev, struct ib_qp_init_attr *attr,
return wqe_size;
if (wqe_size > dev->mdev.caps.max_sq_desc_sz) {
mlx5_ib_dbg(dev, "\n");
mlx5_ib_dbg(dev, "wqe_size(%d) > max_sq_desc_sz(%d)\n",
wqe_size, dev->mdev.caps.max_sq_desc_sz);
return -EINVAL;
}
......@@ -280,9 +284,15 @@ static int calc_sq_size(struct mlx5_ib_dev *dev, struct ib_qp_init_attr *attr,
wq_size = roundup_pow_of_two(attr->cap.max_send_wr * wqe_size);
qp->sq.wqe_cnt = wq_size / MLX5_SEND_WQE_BB;
if (qp->sq.wqe_cnt > dev->mdev.caps.max_wqes) {
mlx5_ib_dbg(dev, "wqe count(%d) exceeds limits(%d)\n",
qp->sq.wqe_cnt, dev->mdev.caps.max_wqes);
return -ENOMEM;
}
qp->sq.wqe_shift = ilog2(MLX5_SEND_WQE_BB);
qp->sq.max_gs = attr->cap.max_send_sge;
qp->sq.max_post = 1 << ilog2(wq_size / wqe_size);
qp->sq.max_post = wq_size / wqe_size;
attr->cap.max_send_wr = qp->sq.max_post;
return wq_size;
}
......@@ -1280,6 +1290,11 @@ static enum mlx5_qp_optpar opt_mask[MLX5_QP_NUM_STATE][MLX5_QP_NUM_STATE][MLX5_Q
MLX5_QP_OPTPAR_Q_KEY,
[MLX5_QP_ST_MLX] = MLX5_QP_OPTPAR_PKEY_INDEX |
MLX5_QP_OPTPAR_Q_KEY,
[MLX5_QP_ST_XRC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH |
MLX5_QP_OPTPAR_RRE |
MLX5_QP_OPTPAR_RAE |
MLX5_QP_OPTPAR_RWE |
MLX5_QP_OPTPAR_PKEY_INDEX,
},
},
[MLX5_QP_STATE_RTR] = {
......@@ -1314,6 +1329,11 @@ static enum mlx5_qp_optpar opt_mask[MLX5_QP_NUM_STATE][MLX5_QP_NUM_STATE][MLX5_Q
[MLX5_QP_STATE_RTS] = {
[MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_Q_KEY,
[MLX5_QP_ST_MLX] = MLX5_QP_OPTPAR_Q_KEY,
[MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_RWE,
[MLX5_QP_ST_RC] = MLX5_QP_OPTPAR_RNR_TIMEOUT |
MLX5_QP_OPTPAR_RWE |
MLX5_QP_OPTPAR_RAE |
MLX5_QP_OPTPAR_RRE,
},
},
};
......@@ -1651,29 +1671,6 @@ static __always_inline void set_raddr_seg(struct mlx5_wqe_raddr_seg *rseg,
rseg->reserved = 0;
}
static void set_atomic_seg(struct mlx5_wqe_atomic_seg *aseg, struct ib_send_wr *wr)
{
if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
aseg->swap_add = cpu_to_be64(wr->wr.atomic.swap);
aseg->compare = cpu_to_be64(wr->wr.atomic.compare_add);
} else if (wr->opcode == IB_WR_MASKED_ATOMIC_FETCH_AND_ADD) {
aseg->swap_add = cpu_to_be64(wr->wr.atomic.compare_add);
aseg->compare = cpu_to_be64(wr->wr.atomic.compare_add_mask);
} else {
aseg->swap_add = cpu_to_be64(wr->wr.atomic.compare_add);
aseg->compare = 0;
}
}
static void set_masked_atomic_seg(struct mlx5_wqe_masked_atomic_seg *aseg,
struct ib_send_wr *wr)
{
aseg->swap_add = cpu_to_be64(wr->wr.atomic.swap);
aseg->swap_add_mask = cpu_to_be64(wr->wr.atomic.swap_mask);
aseg->compare = cpu_to_be64(wr->wr.atomic.compare_add);
aseg->compare_mask = cpu_to_be64(wr->wr.atomic.compare_add_mask);
}
static void set_datagram_seg(struct mlx5_wqe_datagram_seg *dseg,
struct ib_send_wr *wr)
{
......@@ -2063,28 +2060,11 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
case IB_WR_ATOMIC_CMP_AND_SWP:
case IB_WR_ATOMIC_FETCH_AND_ADD:
set_raddr_seg(seg, wr->wr.atomic.remote_addr,
wr->wr.atomic.rkey);
seg += sizeof(struct mlx5_wqe_raddr_seg);
set_atomic_seg(seg, wr);
seg += sizeof(struct mlx5_wqe_atomic_seg);
size += (sizeof(struct mlx5_wqe_raddr_seg) +
sizeof(struct mlx5_wqe_atomic_seg)) / 16;
break;
case IB_WR_MASKED_ATOMIC_CMP_AND_SWP:
set_raddr_seg(seg, wr->wr.atomic.remote_addr,
wr->wr.atomic.rkey);
seg += sizeof(struct mlx5_wqe_raddr_seg);
set_masked_atomic_seg(seg, wr);
seg += sizeof(struct mlx5_wqe_masked_atomic_seg);
size += (sizeof(struct mlx5_wqe_raddr_seg) +
sizeof(struct mlx5_wqe_masked_atomic_seg)) / 16;
break;
mlx5_ib_warn(dev, "Atomic operations are not supported yet\n");
err = -ENOSYS;
*bad_wr = wr;
goto out;
case IB_WR_LOCAL_INV:
next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL;
......
......@@ -295,7 +295,7 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
mlx5_vfree(in);
if (err) {
mlx5_ib_dbg(dev, "create SRQ failed, err %d\n", err);
goto err_srq;
goto err_usr_kern_srq;
}
mlx5_ib_dbg(dev, "create SRQ with srqn 0x%x\n", srq->msrq.srqn);
......@@ -316,6 +316,8 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
err_core:
mlx5_core_destroy_srq(&dev->mdev, &srq->msrq);
err_usr_kern_srq:
if (pd->uobject)
destroy_srq_user(pd, srq);
else
......
......@@ -357,7 +357,7 @@ static int mthca_eq_int(struct mthca_dev *dev, struct mthca_eq *eq)
mthca_warn(dev, "Unhandled event %02x(%02x) on EQ %d\n",
eqe->type, eqe->subtype, eq->eqn);
break;
};
}
set_eqe_hw(eqe);
++eq->cons_index;
......
......@@ -150,7 +150,7 @@ enum ib_qp_state get_ibqp_state(enum ocrdma_qp_state qps)
return IB_QPS_SQE;
case OCRDMA_QPS_ERR:
return IB_QPS_ERR;
};
}
return IB_QPS_ERR;
}
......@@ -171,7 +171,7 @@ static enum ocrdma_qp_state get_ocrdma_qp_state(enum ib_qp_state qps)
return OCRDMA_QPS_SQE;
case IB_QPS_ERR:
return OCRDMA_QPS_ERR;
};
}
return OCRDMA_QPS_ERR;
}
......@@ -1982,7 +1982,7 @@ int ocrdma_mbx_create_qp(struct ocrdma_qp *qp, struct ib_qp_init_attr *attrs,
break;
default:
return -EINVAL;
};
}
cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_CREATE_QP, sizeof(*cmd));
if (!cmd)
......
......@@ -531,7 +531,7 @@ static void ocrdma_event_handler(struct ocrdma_dev *dev, u32 event)
case BE_DEV_DOWN:
ocrdma_close(dev);
break;
};
}
}
static struct ocrdma_driver ocrdma_drv = {
......
......@@ -141,7 +141,7 @@ static inline void get_link_speed_and_width(struct ocrdma_dev *dev,
/* Unsupported */
*ib_speed = IB_SPEED_SDR;
*ib_width = IB_WIDTH_1X;
};
}
}
......@@ -2331,7 +2331,7 @@ static enum ib_wc_status ocrdma_to_ibwc_err(u16 status)
default:
ibwc_status = IB_WC_GENERAL_ERR;
break;
};
}
return ibwc_status;
}
......@@ -2370,7 +2370,7 @@ static void ocrdma_update_wc(struct ocrdma_qp *qp, struct ib_wc *ibwc,
pr_err("%s() invalid opcode received = 0x%x\n",
__func__, hdr->cw & OCRDMA_WQE_OPCODE_MASK);
break;
};
}
}
static void ocrdma_set_cqe_status_flushed(struct ocrdma_qp *qp,
......
......@@ -180,28 +180,32 @@ static int verify_block_sig(struct mlx5_cmd_prot_block *block)
return 0;
}
static void calc_block_sig(struct mlx5_cmd_prot_block *block, u8 token)
static void calc_block_sig(struct mlx5_cmd_prot_block *block, u8 token,
int csum)
{
block->token = token;
block->ctrl_sig = ~xor8_buf(block->rsvd0, sizeof(*block) - sizeof(block->data) - 2);
if (csum) {
block->ctrl_sig = ~xor8_buf(block->rsvd0, sizeof(*block) -
sizeof(block->data) - 2);
block->sig = ~xor8_buf(block, sizeof(*block) - 1);
}
}
static void calc_chain_sig(struct mlx5_cmd_msg *msg, u8 token)
static void calc_chain_sig(struct mlx5_cmd_msg *msg, u8 token, int csum)
{
struct mlx5_cmd_mailbox *next = msg->next;
while (next) {
calc_block_sig(next->buf, token);
calc_block_sig(next->buf, token, csum);
next = next->next;
}
}
static void set_signature(struct mlx5_cmd_work_ent *ent)
static void set_signature(struct mlx5_cmd_work_ent *ent, int csum)
{
ent->lay->sig = ~xor8_buf(ent->lay, sizeof(*ent->lay));
calc_chain_sig(ent->in, ent->token);
calc_chain_sig(ent->out, ent->token);
calc_chain_sig(ent->in, ent->token, csum);
calc_chain_sig(ent->out, ent->token, csum);
}
static void poll_timeout(struct mlx5_cmd_work_ent *ent)
......@@ -539,8 +543,7 @@ static void cmd_work_handler(struct work_struct *work)
lay->type = MLX5_PCI_CMD_XPORT;
lay->token = ent->token;
lay->status_own = CMD_OWNER_HW;
if (!cmd->checksum_disabled)
set_signature(ent);
set_signature(ent, !cmd->checksum_disabled);
dump_command(dev, ent, 1);
ktime_get_ts(&ent->ts1);
......@@ -773,8 +776,6 @@ static int mlx5_copy_from_msg(void *to, struct mlx5_cmd_msg *from, int size)
copy = min_t(int, size, MLX5_CMD_DATA_BLOCK_SIZE);
block = next->buf;
if (xor8_buf(block, sizeof(*block)) != 0xff)
return -EINVAL;
memcpy(to, block->data, copy);
to += copy;
......@@ -1361,6 +1362,7 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev)
goto err_map;
}
cmd->checksum_disabled = 1;
cmd->max_reg_cmds = (1 << cmd->log_sz) - 1;
cmd->bitmask = (1 << cmd->max_reg_cmds) - 1;
......@@ -1510,7 +1512,7 @@ int mlx5_cmd_status_to_err(struct mlx5_outbox_hdr *hdr)
case MLX5_CMD_STAT_BAD_SYS_STATE_ERR: return -EIO;
case MLX5_CMD_STAT_BAD_RES_ERR: return -EINVAL;
case MLX5_CMD_STAT_RES_BUSY: return -EBUSY;
case MLX5_CMD_STAT_LIM_ERR: return -EINVAL;
case MLX5_CMD_STAT_LIM_ERR: return -ENOMEM;
case MLX5_CMD_STAT_BAD_RES_STATE_ERR: return -EINVAL;
case MLX5_CMD_STAT_IX_ERR: return -EINVAL;
case MLX5_CMD_STAT_NO_RES_ERR: return -EAGAIN;
......
......@@ -366,9 +366,11 @@ int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
goto err_in;
}
snprintf(eq->name, MLX5_MAX_EQ_NAME, "%s@pci:%s",
name, pci_name(dev->pdev));
eq->eqn = out.eq_number;
err = request_irq(table->msix_arr[vecidx].vector, mlx5_msix_handler, 0,
name, eq);
eq->name, eq);
if (err)
goto err_eq;
......
......@@ -165,9 +165,7 @@ static int handle_hca_cap(struct mlx5_core_dev *dev)
struct mlx5_cmd_set_hca_cap_mbox_in *set_ctx = NULL;
struct mlx5_cmd_query_hca_cap_mbox_in query_ctx;
struct mlx5_cmd_set_hca_cap_mbox_out set_out;
struct mlx5_profile *prof = dev->profile;
u64 flags;
int csum = 1;
int err;
memset(&query_ctx, 0, sizeof(query_ctx));
......@@ -197,20 +195,14 @@ static int handle_hca_cap(struct mlx5_core_dev *dev)
memcpy(&set_ctx->hca_cap, &query_out->hca_cap,
sizeof(set_ctx->hca_cap));
if (prof->mask & MLX5_PROF_MASK_CMDIF_CSUM) {
csum = !!prof->cmdif_csum;
flags = be64_to_cpu(set_ctx->hca_cap.flags);
if (csum)
flags |= MLX5_DEV_CAP_FLAG_CMDIF_CSUM;
else
flags &= ~MLX5_DEV_CAP_FLAG_CMDIF_CSUM;
set_ctx->hca_cap.flags = cpu_to_be64(flags);
}
if (dev->profile->mask & MLX5_PROF_MASK_QP_SIZE)
set_ctx->hca_cap.log_max_qp = dev->profile->log_max_qp;
flags = be64_to_cpu(query_out->hca_cap.flags);
/* disable checksum */
flags &= ~MLX5_DEV_CAP_FLAG_CMDIF_CSUM;
set_ctx->hca_cap.flags = cpu_to_be64(flags);
memset(&set_out, 0, sizeof(set_out));
set_ctx->hca_cap.log_uar_page_sz = cpu_to_be16(PAGE_SHIFT - 12);
set_ctx->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_SET_HCA_CAP);
......@@ -225,9 +217,6 @@ static int handle_hca_cap(struct mlx5_core_dev *dev)
if (err)
goto query_ex;
if (!csum)
dev->cmd.checksum_disabled = 1;
query_ex:
kfree(query_out);
kfree(set_ctx);
......
......@@ -90,6 +90,10 @@ struct mlx5_manage_pages_outbox {
__be64 pas[0];
};
enum {
MAX_RECLAIM_TIME_MSECS = 5000,
};
static int insert_page(struct mlx5_core_dev *dev, u64 addr, struct page *page, u16 func_id)
{
struct rb_root *root = &dev->priv.page_root;
......@@ -279,6 +283,9 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages,
int err;
int i;
if (nclaimed)
*nclaimed = 0;
memset(&in, 0, sizeof(in));
outlen = sizeof(*out) + npages * sizeof(out->pas[0]);
out = mlx5_vzalloc(outlen);
......@@ -388,20 +395,25 @@ static int optimal_reclaimed_pages(void)
int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev)
{
unsigned long end = jiffies + msecs_to_jiffies(5000);
unsigned long end = jiffies + msecs_to_jiffies(MAX_RECLAIM_TIME_MSECS);
struct fw_page *fwp;
struct rb_node *p;
int nclaimed = 0;
int err;
do {
p = rb_first(&dev->priv.page_root);
if (p) {
fwp = rb_entry(p, struct fw_page, rb_node);
err = reclaim_pages(dev, fwp->func_id, optimal_reclaimed_pages(), NULL);
err = reclaim_pages(dev, fwp->func_id,
optimal_reclaimed_pages(),
&nclaimed);
if (err) {
mlx5_core_warn(dev, "failed reclaiming pages (%d)\n", err);
return err;
}
if (nclaimed)
end = jiffies + msecs_to_jiffies(MAX_RECLAIM_TIME_MSECS);
}
if (time_after(jiffies, end)) {
mlx5_core_warn(dev, "FW did not return all pages. giving up...\n");
......
......@@ -181,7 +181,7 @@ enum {
MLX5_DEV_CAP_FLAG_TLP_HINTS = 1LL << 39,
MLX5_DEV_CAP_FLAG_SIG_HAND_OVER = 1LL << 40,
MLX5_DEV_CAP_FLAG_DCT = 1LL << 41,
MLX5_DEV_CAP_FLAG_CMDIF_CSUM = 1LL << 46,
MLX5_DEV_CAP_FLAG_CMDIF_CSUM = 3LL << 46,
};
enum {
......@@ -417,7 +417,7 @@ struct mlx5_init_seg {
struct health_buffer health;
__be32 rsvd2[884];
__be32 health_counter;
__be32 rsvd3[1023];
__be32 rsvd3[1019];
__be64 ieee1588_clk;
__be32 ieee1588_clk_type;
__be32 clr_intx;
......
......@@ -82,7 +82,7 @@ enum {
};
enum {
MLX5_MAX_EQ_NAME = 20
MLX5_MAX_EQ_NAME = 32
};
enum {
......@@ -747,8 +747,7 @@ static inline u32 mlx5_idx_to_mkey(u32 mkey_idx)
enum {
MLX5_PROF_MASK_QP_SIZE = (u64)1 << 0,
MLX5_PROF_MASK_CMDIF_CSUM = (u64)1 << 1,
MLX5_PROF_MASK_MR_CACHE = (u64)1 << 2,
MLX5_PROF_MASK_MR_CACHE = (u64)1 << 1,
};
enum {
......@@ -758,7 +757,6 @@ enum {
struct mlx5_profile {
u64 mask;
u32 log_max_qp;
int cmdif_csum;
struct {
int size;
int limit;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment