Commit b4b927fc authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma

Pull rdma fixes from Jason Gunthorpe:
 "Several small recent regressions - rather more than usual, but nothing
  too scary. Good to know people are testing.

   - Typo causing incorrect operation of the mlx5 mkey cache
     expiration

   - Revert a CM patch that is breaking some ULPs

   - Typo breaking SRQ in rxe

   - Revert a rxe patch breaking icrc calculation

   - Static checker warning about unbalanced locking in hns

   - Subtle cxgb4 regression from a recent atomic to refcount
     conversion"

* tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma:
  RDMA/iw_cxgb4: Fix refcount underflow while destroying cqs.
  RDMA/hns: Fix the double unlock problem of poll_sem
  RDMA/rxe: Restore setting tot_len in the IPv4 header
  RDMA/rxe: Use the correct size of wqe when processing SRQ
  RDMA/cma: Revert INIT-INIT patch
  RDMA/mlx5: Delay emptying a cache entry when a new MR is added to it recently
parents 484faec8 2638a323
...@@ -926,12 +926,25 @@ static int cma_init_ud_qp(struct rdma_id_private *id_priv, struct ib_qp *qp) ...@@ -926,12 +926,25 @@ static int cma_init_ud_qp(struct rdma_id_private *id_priv, struct ib_qp *qp)
return ret; return ret;
} }
static int cma_init_conn_qp(struct rdma_id_private *id_priv, struct ib_qp *qp)
{
struct ib_qp_attr qp_attr;
int qp_attr_mask, ret;
qp_attr.qp_state = IB_QPS_INIT;
ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask);
if (ret)
return ret;
return ib_modify_qp(qp, &qp_attr, qp_attr_mask);
}
int rdma_create_qp(struct rdma_cm_id *id, struct ib_pd *pd, int rdma_create_qp(struct rdma_cm_id *id, struct ib_pd *pd,
struct ib_qp_init_attr *qp_init_attr) struct ib_qp_init_attr *qp_init_attr)
{ {
struct rdma_id_private *id_priv; struct rdma_id_private *id_priv;
struct ib_qp *qp; struct ib_qp *qp;
int ret = 0; int ret;
id_priv = container_of(id, struct rdma_id_private, id); id_priv = container_of(id, struct rdma_id_private, id);
if (id->device != pd->device) { if (id->device != pd->device) {
...@@ -948,6 +961,8 @@ int rdma_create_qp(struct rdma_cm_id *id, struct ib_pd *pd, ...@@ -948,6 +961,8 @@ int rdma_create_qp(struct rdma_cm_id *id, struct ib_pd *pd,
if (id->qp_type == IB_QPT_UD) if (id->qp_type == IB_QPT_UD)
ret = cma_init_ud_qp(id_priv, qp); ret = cma_init_ud_qp(id_priv, qp);
else
ret = cma_init_conn_qp(id_priv, qp);
if (ret) if (ret)
goto out_destroy; goto out_destroy;
......
...@@ -967,6 +967,12 @@ int c4iw_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc) ...@@ -967,6 +967,12 @@ int c4iw_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
return !err || err == -ENODATA ? npolled : err; return !err || err == -ENODATA ? npolled : err;
} }
void c4iw_cq_rem_ref(struct c4iw_cq *chp)
{
if (refcount_dec_and_test(&chp->refcnt))
complete(&chp->cq_rel_comp);
}
int c4iw_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata) int c4iw_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
{ {
struct c4iw_cq *chp; struct c4iw_cq *chp;
...@@ -976,8 +982,8 @@ int c4iw_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata) ...@@ -976,8 +982,8 @@ int c4iw_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
chp = to_c4iw_cq(ib_cq); chp = to_c4iw_cq(ib_cq);
xa_erase_irq(&chp->rhp->cqs, chp->cq.cqid); xa_erase_irq(&chp->rhp->cqs, chp->cq.cqid);
refcount_dec(&chp->refcnt); c4iw_cq_rem_ref(chp);
wait_event(chp->wait, !refcount_read(&chp->refcnt)); wait_for_completion(&chp->cq_rel_comp);
ucontext = rdma_udata_to_drv_context(udata, struct c4iw_ucontext, ucontext = rdma_udata_to_drv_context(udata, struct c4iw_ucontext,
ibucontext); ibucontext);
...@@ -1081,7 +1087,7 @@ int c4iw_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, ...@@ -1081,7 +1087,7 @@ int c4iw_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
spin_lock_init(&chp->lock); spin_lock_init(&chp->lock);
spin_lock_init(&chp->comp_handler_lock); spin_lock_init(&chp->comp_handler_lock);
refcount_set(&chp->refcnt, 1); refcount_set(&chp->refcnt, 1);
init_waitqueue_head(&chp->wait); init_completion(&chp->cq_rel_comp);
ret = xa_insert_irq(&rhp->cqs, chp->cq.cqid, chp, GFP_KERNEL); ret = xa_insert_irq(&rhp->cqs, chp->cq.cqid, chp, GFP_KERNEL);
if (ret) if (ret)
goto err_destroy_cq; goto err_destroy_cq;
......
...@@ -213,8 +213,7 @@ void c4iw_ev_dispatch(struct c4iw_dev *dev, struct t4_cqe *err_cqe) ...@@ -213,8 +213,7 @@ void c4iw_ev_dispatch(struct c4iw_dev *dev, struct t4_cqe *err_cqe)
break; break;
} }
done: done:
if (refcount_dec_and_test(&chp->refcnt)) c4iw_cq_rem_ref(chp);
wake_up(&chp->wait);
c4iw_qp_rem_ref(&qhp->ibqp); c4iw_qp_rem_ref(&qhp->ibqp);
out: out:
return; return;
...@@ -234,8 +233,7 @@ int c4iw_ev_handler(struct c4iw_dev *dev, u32 qid) ...@@ -234,8 +233,7 @@ int c4iw_ev_handler(struct c4iw_dev *dev, u32 qid)
spin_lock_irqsave(&chp->comp_handler_lock, flag); spin_lock_irqsave(&chp->comp_handler_lock, flag);
(*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context); (*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context);
spin_unlock_irqrestore(&chp->comp_handler_lock, flag); spin_unlock_irqrestore(&chp->comp_handler_lock, flag);
if (refcount_dec_and_test(&chp->refcnt)) c4iw_cq_rem_ref(chp);
wake_up(&chp->wait);
} else { } else {
pr_debug("unknown cqid 0x%x\n", qid); pr_debug("unknown cqid 0x%x\n", qid);
xa_unlock_irqrestore(&dev->cqs, flag); xa_unlock_irqrestore(&dev->cqs, flag);
......
...@@ -428,7 +428,7 @@ struct c4iw_cq { ...@@ -428,7 +428,7 @@ struct c4iw_cq {
spinlock_t lock; spinlock_t lock;
spinlock_t comp_handler_lock; spinlock_t comp_handler_lock;
refcount_t refcnt; refcount_t refcnt;
wait_queue_head_t wait; struct completion cq_rel_comp;
struct c4iw_wr_wait *wr_waitp; struct c4iw_wr_wait *wr_waitp;
}; };
...@@ -979,6 +979,7 @@ struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start, ...@@ -979,6 +979,7 @@ struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start,
struct ib_mr *c4iw_get_dma_mr(struct ib_pd *pd, int acc); struct ib_mr *c4iw_get_dma_mr(struct ib_pd *pd, int acc);
int c4iw_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata); int c4iw_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata);
int c4iw_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata); int c4iw_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata);
void c4iw_cq_rem_ref(struct c4iw_cq *chp);
int c4iw_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, int c4iw_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
struct ib_udata *udata); struct ib_udata *udata);
int c4iw_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags); int c4iw_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
......
...@@ -213,8 +213,10 @@ int hns_roce_cmd_use_events(struct hns_roce_dev *hr_dev) ...@@ -213,8 +213,10 @@ int hns_roce_cmd_use_events(struct hns_roce_dev *hr_dev)
hr_cmd->context = hr_cmd->context =
kcalloc(hr_cmd->max_cmds, sizeof(*hr_cmd->context), GFP_KERNEL); kcalloc(hr_cmd->max_cmds, sizeof(*hr_cmd->context), GFP_KERNEL);
if (!hr_cmd->context) if (!hr_cmd->context) {
hr_dev->cmd_mod = 0;
return -ENOMEM; return -ENOMEM;
}
for (i = 0; i < hr_cmd->max_cmds; ++i) { for (i = 0; i < hr_cmd->max_cmds; ++i) {
hr_cmd->context[i].token = i; hr_cmd->context[i].token = i;
...@@ -228,7 +230,6 @@ int hns_roce_cmd_use_events(struct hns_roce_dev *hr_dev) ...@@ -228,7 +230,6 @@ int hns_roce_cmd_use_events(struct hns_roce_dev *hr_dev)
spin_lock_init(&hr_cmd->context_lock); spin_lock_init(&hr_cmd->context_lock);
hr_cmd->use_events = 1; hr_cmd->use_events = 1;
down(&hr_cmd->poll_sem);
return 0; return 0;
} }
...@@ -239,8 +240,6 @@ void hns_roce_cmd_use_polling(struct hns_roce_dev *hr_dev) ...@@ -239,8 +240,6 @@ void hns_roce_cmd_use_polling(struct hns_roce_dev *hr_dev)
kfree(hr_cmd->context); kfree(hr_cmd->context);
hr_cmd->use_events = 0; hr_cmd->use_events = 0;
up(&hr_cmd->poll_sem);
} }
struct hns_roce_cmd_mailbox * struct hns_roce_cmd_mailbox *
......
...@@ -873,11 +873,9 @@ int hns_roce_init(struct hns_roce_dev *hr_dev) ...@@ -873,11 +873,9 @@ int hns_roce_init(struct hns_roce_dev *hr_dev)
if (hr_dev->cmd_mod) { if (hr_dev->cmd_mod) {
ret = hns_roce_cmd_use_events(hr_dev); ret = hns_roce_cmd_use_events(hr_dev);
if (ret) { if (ret)
dev_warn(dev, dev_warn(dev,
"Cmd event mode failed, set back to poll!\n"); "Cmd event mode failed, set back to poll!\n");
hns_roce_cmd_use_polling(hr_dev);
}
} }
ret = hns_roce_init_hem(hr_dev); ret = hns_roce_init_hem(hr_dev);
......
...@@ -531,8 +531,8 @@ static void __cache_work_func(struct mlx5_cache_ent *ent) ...@@ -531,8 +531,8 @@ static void __cache_work_func(struct mlx5_cache_ent *ent)
*/ */
spin_unlock_irq(&ent->lock); spin_unlock_irq(&ent->lock);
need_delay = need_resched() || someone_adding(cache) || need_delay = need_resched() || someone_adding(cache) ||
time_after(jiffies, !time_after(jiffies,
READ_ONCE(cache->last_add) + 300 * HZ); READ_ONCE(cache->last_add) + 300 * HZ);
spin_lock_irq(&ent->lock); spin_lock_irq(&ent->lock);
if (ent->disabled) if (ent->disabled)
goto out; goto out;
......
...@@ -259,6 +259,7 @@ static void prepare_ipv4_hdr(struct dst_entry *dst, struct sk_buff *skb, ...@@ -259,6 +259,7 @@ static void prepare_ipv4_hdr(struct dst_entry *dst, struct sk_buff *skb,
iph->version = IPVERSION; iph->version = IPVERSION;
iph->ihl = sizeof(struct iphdr) >> 2; iph->ihl = sizeof(struct iphdr) >> 2;
iph->tot_len = htons(skb->len);
iph->frag_off = df; iph->frag_off = df;
iph->protocol = proto; iph->protocol = proto;
iph->tos = tos; iph->tos = tos;
......
...@@ -318,7 +318,7 @@ static enum resp_states get_srq_wqe(struct rxe_qp *qp) ...@@ -318,7 +318,7 @@ static enum resp_states get_srq_wqe(struct rxe_qp *qp)
pr_warn("%s: invalid num_sge in SRQ entry\n", __func__); pr_warn("%s: invalid num_sge in SRQ entry\n", __func__);
return RESPST_ERR_MALFORMED_WQE; return RESPST_ERR_MALFORMED_WQE;
} }
size = sizeof(wqe) + wqe->dma.num_sge*sizeof(struct rxe_sge); size = sizeof(*wqe) + wqe->dma.num_sge*sizeof(struct rxe_sge);
memcpy(&qp->resp.srq_wqe, wqe, size); memcpy(&qp->resp.srq_wqe, wqe, size);
qp->resp.wqe = &qp->resp.srq_wqe.wqe; qp->resp.wqe = &qp->resp.srq_wqe.wqe;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment