Commit aa0c9086 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma

Pull rdma fixes from Jason Gunthorpe:
 "Small update, a few more merge window bugs and normal driver bug
  fixes:

   - Two merge window regressions in mlx5: a error path bug found by
     syzkaller and some lost code during a rework preventing ipoib from
     working in some configurations

   - Silence clang compilation warning in OPA related code

   - Fix a long standing race condition in ib_nl for ACM

   - Resolve when the HFI1 is shutdown"

* tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma:
  RDMA/mlx5: Set PD pointers for the error flow unwind
  IB/mlx5: Fix 50G per lane indication
  RDMA/siw: Fix reporting vendor_part_id
  IB/sa: Resolv use-after-free in ib_nl_make_request()
  IB/hfi1: Do not destroy link_wq when the device is shut down
  IB/hfi1: Do not destroy hfi1_wq when the device is shut down
  RDMA/mlx5: Fix legacy IPoIB QP initialization
  IB/hfi1: Add explicit cast OPA_MTU_8192 to 'enum ib_mtu'
parents 0f318cba 0a037150
...@@ -829,13 +829,20 @@ static int ib_nl_get_path_rec_attrs_len(ib_sa_comp_mask comp_mask) ...@@ -829,13 +829,20 @@ static int ib_nl_get_path_rec_attrs_len(ib_sa_comp_mask comp_mask)
return len; return len;
} }
static int ib_nl_send_msg(struct ib_sa_query *query, gfp_t gfp_mask) static int ib_nl_make_request(struct ib_sa_query *query, gfp_t gfp_mask)
{ {
struct sk_buff *skb = NULL; struct sk_buff *skb = NULL;
struct nlmsghdr *nlh; struct nlmsghdr *nlh;
void *data; void *data;
struct ib_sa_mad *mad; struct ib_sa_mad *mad;
int len; int len;
unsigned long flags;
unsigned long delay;
gfp_t gfp_flag;
int ret;
INIT_LIST_HEAD(&query->list);
query->seq = (u32)atomic_inc_return(&ib_nl_sa_request_seq);
mad = query->mad_buf->mad; mad = query->mad_buf->mad;
len = ib_nl_get_path_rec_attrs_len(mad->sa_hdr.comp_mask); len = ib_nl_get_path_rec_attrs_len(mad->sa_hdr.comp_mask);
...@@ -860,36 +867,25 @@ static int ib_nl_send_msg(struct ib_sa_query *query, gfp_t gfp_mask) ...@@ -860,36 +867,25 @@ static int ib_nl_send_msg(struct ib_sa_query *query, gfp_t gfp_mask)
/* Repair the nlmsg header length */ /* Repair the nlmsg header length */
nlmsg_end(skb, nlh); nlmsg_end(skb, nlh);
return rdma_nl_multicast(&init_net, skb, RDMA_NL_GROUP_LS, gfp_mask); gfp_flag = ((gfp_mask & GFP_ATOMIC) == GFP_ATOMIC) ? GFP_ATOMIC :
} GFP_NOWAIT;
static int ib_nl_make_request(struct ib_sa_query *query, gfp_t gfp_mask) spin_lock_irqsave(&ib_nl_request_lock, flags);
{ ret = rdma_nl_multicast(&init_net, skb, RDMA_NL_GROUP_LS, gfp_flag);
unsigned long flags;
unsigned long delay;
int ret;
INIT_LIST_HEAD(&query->list); if (ret)
query->seq = (u32)atomic_inc_return(&ib_nl_sa_request_seq); goto out;
/* Put the request on the list first.*/ /* Put the request on the list.*/
spin_lock_irqsave(&ib_nl_request_lock, flags);
delay = msecs_to_jiffies(sa_local_svc_timeout_ms); delay = msecs_to_jiffies(sa_local_svc_timeout_ms);
query->timeout = delay + jiffies; query->timeout = delay + jiffies;
list_add_tail(&query->list, &ib_nl_request_list); list_add_tail(&query->list, &ib_nl_request_list);
/* Start the timeout if this is the only request */ /* Start the timeout if this is the only request */
if (ib_nl_request_list.next == &query->list) if (ib_nl_request_list.next == &query->list)
queue_delayed_work(ib_nl_wq, &ib_nl_timed_work, delay); queue_delayed_work(ib_nl_wq, &ib_nl_timed_work, delay);
spin_unlock_irqrestore(&ib_nl_request_lock, flags);
ret = ib_nl_send_msg(query, gfp_mask); out:
if (ret) { spin_unlock_irqrestore(&ib_nl_request_lock, flags);
ret = -EIO;
/* Remove the request */
spin_lock_irqsave(&ib_nl_request_lock, flags);
list_del(&query->list);
spin_unlock_irqrestore(&ib_nl_request_lock, flags);
}
return ret; return ret;
} }
......
...@@ -830,6 +830,29 @@ static int create_workqueues(struct hfi1_devdata *dd) ...@@ -830,6 +830,29 @@ static int create_workqueues(struct hfi1_devdata *dd)
return -ENOMEM; return -ENOMEM;
} }
/**
* destroy_workqueues - destroy per port workqueues
* @dd: the hfi1_ib device
*/
static void destroy_workqueues(struct hfi1_devdata *dd)
{
int pidx;
struct hfi1_pportdata *ppd;
for (pidx = 0; pidx < dd->num_pports; ++pidx) {
ppd = dd->pport + pidx;
if (ppd->hfi1_wq) {
destroy_workqueue(ppd->hfi1_wq);
ppd->hfi1_wq = NULL;
}
if (ppd->link_wq) {
destroy_workqueue(ppd->link_wq);
ppd->link_wq = NULL;
}
}
}
/** /**
* enable_general_intr() - Enable the IRQs that will be handled by the * enable_general_intr() - Enable the IRQs that will be handled by the
* general interrupt handler. * general interrupt handler.
...@@ -1103,15 +1126,10 @@ static void shutdown_device(struct hfi1_devdata *dd) ...@@ -1103,15 +1126,10 @@ static void shutdown_device(struct hfi1_devdata *dd)
* We can't count on interrupts since we are stopping. * We can't count on interrupts since we are stopping.
*/ */
hfi1_quiet_serdes(ppd); hfi1_quiet_serdes(ppd);
if (ppd->hfi1_wq)
if (ppd->hfi1_wq) { flush_workqueue(ppd->hfi1_wq);
destroy_workqueue(ppd->hfi1_wq); if (ppd->link_wq)
ppd->hfi1_wq = NULL; flush_workqueue(ppd->link_wq);
}
if (ppd->link_wq) {
destroy_workqueue(ppd->link_wq);
ppd->link_wq = NULL;
}
} }
sdma_exit(dd); sdma_exit(dd);
} }
...@@ -1756,6 +1774,7 @@ static void remove_one(struct pci_dev *pdev) ...@@ -1756,6 +1774,7 @@ static void remove_one(struct pci_dev *pdev)
* clear dma engines, etc. * clear dma engines, etc.
*/ */
shutdown_device(dd); shutdown_device(dd);
destroy_workqueues(dd);
stop_timers(dd); stop_timers(dd);
......
...@@ -195,7 +195,7 @@ static inline int verbs_mtu_enum_to_int(struct ib_device *dev, enum ib_mtu mtu) ...@@ -195,7 +195,7 @@ static inline int verbs_mtu_enum_to_int(struct ib_device *dev, enum ib_mtu mtu)
{ {
/* Constraining 10KB packets to 8KB packets */ /* Constraining 10KB packets to 8KB packets */
if (mtu == (enum ib_mtu)OPA_MTU_10240) if (mtu == (enum ib_mtu)OPA_MTU_10240)
mtu = OPA_MTU_8192; mtu = (enum ib_mtu)OPA_MTU_8192;
return opa_mtu_enum_to_int((enum opa_mtu)mtu); return opa_mtu_enum_to_int((enum opa_mtu)mtu);
} }
...@@ -367,7 +367,10 @@ bool _hfi1_schedule_send(struct rvt_qp *qp) ...@@ -367,7 +367,10 @@ bool _hfi1_schedule_send(struct rvt_qp *qp)
struct hfi1_ibport *ibp = struct hfi1_ibport *ibp =
to_iport(qp->ibqp.device, qp->port_num); to_iport(qp->ibqp.device, qp->port_num);
struct hfi1_pportdata *ppd = ppd_from_ibp(ibp); struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device); struct hfi1_devdata *dd = ppd->dd;
if (dd->flags & HFI1_SHUTDOWN)
return true;
return iowait_schedule(&priv->s_iowait, ppd->hfi1_wq, return iowait_schedule(&priv->s_iowait, ppd->hfi1_wq,
priv->s_sde ? priv->s_sde ?
......
...@@ -5406,7 +5406,10 @@ static bool _hfi1_schedule_tid_send(struct rvt_qp *qp) ...@@ -5406,7 +5406,10 @@ static bool _hfi1_schedule_tid_send(struct rvt_qp *qp)
struct hfi1_ibport *ibp = struct hfi1_ibport *ibp =
to_iport(qp->ibqp.device, qp->port_num); to_iport(qp->ibqp.device, qp->port_num);
struct hfi1_pportdata *ppd = ppd_from_ibp(ibp); struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device); struct hfi1_devdata *dd = ppd->dd;
if ((dd->flags & HFI1_SHUTDOWN))
return true;
return iowait_tid_schedule(&priv->s_iowait, ppd->hfi1_wq, return iowait_tid_schedule(&priv->s_iowait, ppd->hfi1_wq,
priv->s_sde ? priv->s_sde ?
......
...@@ -511,7 +511,7 @@ static int mlx5_query_port_roce(struct ib_device *device, u8 port_num, ...@@ -511,7 +511,7 @@ static int mlx5_query_port_roce(struct ib_device *device, u8 port_num,
mdev_port_num); mdev_port_num);
if (err) if (err)
goto out; goto out;
ext = MLX5_CAP_PCAM_FEATURE(dev->mdev, ptys_extended_ethernet); ext = !!MLX5_GET_ETH_PROTO(ptys_reg, out, true, eth_proto_capability);
eth_prot_oper = MLX5_GET_ETH_PROTO(ptys_reg, out, ext, eth_proto_oper); eth_prot_oper = MLX5_GET_ETH_PROTO(ptys_reg, out, ext, eth_proto_oper);
props->active_width = IB_WIDTH_4X; props->active_width = IB_WIDTH_4X;
......
...@@ -2668,6 +2668,10 @@ static int process_create_flags(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, ...@@ -2668,6 +2668,10 @@ static int process_create_flags(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
if (qp_type == IB_QPT_RAW_PACKET && attr->rwq_ind_tbl) if (qp_type == IB_QPT_RAW_PACKET && attr->rwq_ind_tbl)
return (create_flags) ? -EINVAL : 0; return (create_flags) ? -EINVAL : 0;
process_create_flag(dev, &create_flags, IB_QP_CREATE_NETIF_QP,
mlx5_get_flow_namespace(dev->mdev,
MLX5_FLOW_NAMESPACE_BYPASS),
qp);
process_create_flag(dev, &create_flags, process_create_flag(dev, &create_flags,
IB_QP_CREATE_INTEGRITY_EN, IB_QP_CREATE_INTEGRITY_EN,
MLX5_CAP_GEN(mdev, sho), qp); MLX5_CAP_GEN(mdev, sho), qp);
...@@ -3001,11 +3005,12 @@ struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attr, ...@@ -3001,11 +3005,12 @@ struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attr,
mlx5_ib_destroy_dct(qp); mlx5_ib_destroy_dct(qp);
} else { } else {
/* /*
* The two lines below are temp solution till QP allocation * These lines below are temp solution till QP allocation
* will be moved to be under IB/core responsiblity. * will be moved to be under IB/core responsiblity.
*/ */
qp->ibqp.send_cq = attr->send_cq; qp->ibqp.send_cq = attr->send_cq;
qp->ibqp.recv_cq = attr->recv_cq; qp->ibqp.recv_cq = attr->recv_cq;
qp->ibqp.pd = pd;
destroy_qp_common(dev, qp, udata); destroy_qp_common(dev, qp, udata);
} }
......
...@@ -67,12 +67,13 @@ static int siw_device_register(struct siw_device *sdev, const char *name) ...@@ -67,12 +67,13 @@ static int siw_device_register(struct siw_device *sdev, const char *name)
static int dev_id = 1; static int dev_id = 1;
int rv; int rv;
sdev->vendor_part_id = dev_id++;
rv = ib_register_device(base_dev, name); rv = ib_register_device(base_dev, name);
if (rv) { if (rv) {
pr_warn("siw: device registration error %d\n", rv); pr_warn("siw: device registration error %d\n", rv);
return rv; return rv;
} }
sdev->vendor_part_id = dev_id++;
siw_dbg(base_dev, "HWaddr=%pM\n", sdev->netdev->dev_addr); siw_dbg(base_dev, "HWaddr=%pM\n", sdev->netdev->dev_addr);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment