Commit f31c32ef authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma

Pull rdma fixes from Jason Gunthorpe:
 "A few minor fixes:

   - Fix buffer management in SRP to correct a regression with the login
     authentication feature from v5.17

   - Don't iterate over non-present ports in mlx5

   - Fix an error introduced by the foritify work in cxgb4

   - Two bug fixes for the recently merged ERDMA driver

   - Unbreak RDMA dmabuf support, a regresion from v5.19"

* tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma:
  RDMA: Handle the return code from dma_resv_wait_timeout() properly
  RDMA/erdma: Correct the max_qp and max_cq capacities of the device
  RDMA/erdma: Using the key in FMR WR instead of MR structure
  RDMA/cxgb4: fix accept failure due to increased cpl_t5_pass_accept_rpl size
  RDMA/mlx5: Use the proper number of ports
  IB/iser: Fix login with authentication
parents b9bce6e5 b16de8b9
...@@ -18,6 +18,7 @@ int ib_umem_dmabuf_map_pages(struct ib_umem_dmabuf *umem_dmabuf) ...@@ -18,6 +18,7 @@ int ib_umem_dmabuf_map_pages(struct ib_umem_dmabuf *umem_dmabuf)
struct scatterlist *sg; struct scatterlist *sg;
unsigned long start, end, cur = 0; unsigned long start, end, cur = 0;
unsigned int nmap = 0; unsigned int nmap = 0;
long ret;
int i; int i;
dma_resv_assert_held(umem_dmabuf->attach->dmabuf->resv); dma_resv_assert_held(umem_dmabuf->attach->dmabuf->resv);
...@@ -67,9 +68,14 @@ int ib_umem_dmabuf_map_pages(struct ib_umem_dmabuf *umem_dmabuf) ...@@ -67,9 +68,14 @@ int ib_umem_dmabuf_map_pages(struct ib_umem_dmabuf *umem_dmabuf)
* may be not up-to-date. Wait for the exporter to finish * may be not up-to-date. Wait for the exporter to finish
* the migration. * the migration.
*/ */
return dma_resv_wait_timeout(umem_dmabuf->attach->dmabuf->resv, ret = dma_resv_wait_timeout(umem_dmabuf->attach->dmabuf->resv,
DMA_RESV_USAGE_KERNEL, DMA_RESV_USAGE_KERNEL,
false, MAX_SCHEDULE_TIMEOUT); false, MAX_SCHEDULE_TIMEOUT);
if (ret < 0)
return ret;
if (ret == 0)
return -ETIMEDOUT;
return 0;
} }
EXPORT_SYMBOL(ib_umem_dmabuf_map_pages); EXPORT_SYMBOL(ib_umem_dmabuf_map_pages);
......
...@@ -2468,31 +2468,24 @@ static int accept_cr(struct c4iw_ep *ep, struct sk_buff *skb, ...@@ -2468,31 +2468,24 @@ static int accept_cr(struct c4iw_ep *ep, struct sk_buff *skb,
opt2 |= CCTRL_ECN_V(1); opt2 |= CCTRL_ECN_V(1);
} }
skb_get(skb);
rpl = cplhdr(skb);
if (!is_t4(adapter_type)) { if (!is_t4(adapter_type)) {
BUILD_BUG_ON(sizeof(*rpl5) != roundup(sizeof(*rpl5), 16));
skb_trim(skb, sizeof(*rpl5));
rpl5 = (void *)rpl;
INIT_TP_WR(rpl5, ep->hwtid);
} else {
skb_trim(skb, sizeof(*rpl));
INIT_TP_WR(rpl, ep->hwtid);
}
OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL,
ep->hwtid));
if (CHELSIO_CHIP_VERSION(adapter_type) > CHELSIO_T4) {
u32 isn = (prandom_u32() & ~7UL) - 1; u32 isn = (prandom_u32() & ~7UL) - 1;
skb = get_skb(skb, roundup(sizeof(*rpl5), 16), GFP_KERNEL);
rpl5 = __skb_put_zero(skb, roundup(sizeof(*rpl5), 16));
rpl = (void *)rpl5;
INIT_TP_WR_CPL(rpl5, CPL_PASS_ACCEPT_RPL, ep->hwtid);
opt2 |= T5_OPT_2_VALID_F; opt2 |= T5_OPT_2_VALID_F;
opt2 |= CONG_CNTRL_V(CONG_ALG_TAHOE); opt2 |= CONG_CNTRL_V(CONG_ALG_TAHOE);
opt2 |= T5_ISS_F; opt2 |= T5_ISS_F;
rpl5 = (void *)rpl;
memset_after(rpl5, 0, iss);
if (peer2peer) if (peer2peer)
isn += 4; isn += 4;
rpl5->iss = cpu_to_be32(isn); rpl5->iss = cpu_to_be32(isn);
pr_debug("iss %u\n", be32_to_cpu(rpl5->iss)); pr_debug("iss %u\n", be32_to_cpu(rpl5->iss));
} else {
skb = get_skb(skb, sizeof(*rpl), GFP_KERNEL);
rpl = __skb_put_zero(skb, sizeof(*rpl));
INIT_TP_WR_CPL(rpl, CPL_PASS_ACCEPT_RPL, ep->hwtid);
} }
rpl->opt0 = cpu_to_be64(opt0); rpl->opt0 = cpu_to_be64(opt0);
......
...@@ -407,7 +407,7 @@ static int erdma_push_one_sqe(struct erdma_qp *qp, u16 *pi, ...@@ -407,7 +407,7 @@ static int erdma_push_one_sqe(struct erdma_qp *qp, u16 *pi,
to_erdma_access_flags(reg_wr(send_wr)->access); to_erdma_access_flags(reg_wr(send_wr)->access);
regmr_sge->addr = cpu_to_le64(mr->ibmr.iova); regmr_sge->addr = cpu_to_le64(mr->ibmr.iova);
regmr_sge->length = cpu_to_le32(mr->ibmr.length); regmr_sge->length = cpu_to_le32(mr->ibmr.length);
regmr_sge->stag = cpu_to_le32(mr->ibmr.lkey); regmr_sge->stag = cpu_to_le32(reg_wr(send_wr)->key);
attrs = FIELD_PREP(ERDMA_SQE_MR_MODE_MASK, 0) | attrs = FIELD_PREP(ERDMA_SQE_MR_MODE_MASK, 0) |
FIELD_PREP(ERDMA_SQE_MR_ACCESS_MASK, mr->access) | FIELD_PREP(ERDMA_SQE_MR_ACCESS_MASK, mr->access) |
FIELD_PREP(ERDMA_SQE_MR_MTT_CNT_MASK, FIELD_PREP(ERDMA_SQE_MR_MTT_CNT_MASK,
......
...@@ -280,7 +280,7 @@ int erdma_query_device(struct ib_device *ibdev, struct ib_device_attr *attr, ...@@ -280,7 +280,7 @@ int erdma_query_device(struct ib_device *ibdev, struct ib_device_attr *attr,
attr->vendor_id = PCI_VENDOR_ID_ALIBABA; attr->vendor_id = PCI_VENDOR_ID_ALIBABA;
attr->vendor_part_id = dev->pdev->device; attr->vendor_part_id = dev->pdev->device;
attr->hw_ver = dev->pdev->revision; attr->hw_ver = dev->pdev->revision;
attr->max_qp = dev->attrs.max_qp; attr->max_qp = dev->attrs.max_qp - 1;
attr->max_qp_wr = min(dev->attrs.max_send_wr, dev->attrs.max_recv_wr); attr->max_qp_wr = min(dev->attrs.max_send_wr, dev->attrs.max_recv_wr);
attr->max_qp_rd_atom = dev->attrs.max_ord; attr->max_qp_rd_atom = dev->attrs.max_ord;
attr->max_qp_init_rd_atom = dev->attrs.max_ird; attr->max_qp_init_rd_atom = dev->attrs.max_ird;
...@@ -291,7 +291,7 @@ int erdma_query_device(struct ib_device *ibdev, struct ib_device_attr *attr, ...@@ -291,7 +291,7 @@ int erdma_query_device(struct ib_device *ibdev, struct ib_device_attr *attr,
attr->max_send_sge = dev->attrs.max_send_sge; attr->max_send_sge = dev->attrs.max_send_sge;
attr->max_recv_sge = dev->attrs.max_recv_sge; attr->max_recv_sge = dev->attrs.max_recv_sge;
attr->max_sge_rd = dev->attrs.max_sge_rd; attr->max_sge_rd = dev->attrs.max_sge_rd;
attr->max_cq = dev->attrs.max_cq; attr->max_cq = dev->attrs.max_cq - 1;
attr->max_cqe = dev->attrs.max_cqe; attr->max_cqe = dev->attrs.max_cqe;
attr->max_mr = dev->attrs.max_mr; attr->max_mr = dev->attrs.max_mr;
attr->max_pd = dev->attrs.max_pd; attr->max_pd = dev->attrs.max_pd;
......
...@@ -2738,26 +2738,24 @@ static int set_has_smi_cap(struct mlx5_ib_dev *dev) ...@@ -2738,26 +2738,24 @@ static int set_has_smi_cap(struct mlx5_ib_dev *dev)
int err; int err;
int port; int port;
for (port = 1; port <= ARRAY_SIZE(dev->port_caps); port++) { if (MLX5_CAP_GEN(dev->mdev, port_type) != MLX5_CAP_PORT_TYPE_IB)
dev->port_caps[port - 1].has_smi = false; return 0;
if (MLX5_CAP_GEN(dev->mdev, port_type) ==
MLX5_CAP_PORT_TYPE_IB) { for (port = 1; port <= dev->num_ports; port++) {
if (MLX5_CAP_GEN(dev->mdev, ib_virt)) { if (!MLX5_CAP_GEN(dev->mdev, ib_virt)) {
err = mlx5_query_hca_vport_context(dev->mdev, 0, dev->port_caps[port - 1].has_smi = true;
port, 0, continue;
&vport_ctx);
if (err) {
mlx5_ib_err(dev, "query_hca_vport_context for port=%d failed %d\n",
port, err);
return err;
}
dev->port_caps[port - 1].has_smi =
vport_ctx.has_smi;
} else {
dev->port_caps[port - 1].has_smi = true;
}
} }
err = mlx5_query_hca_vport_context(dev->mdev, 0, port, 0,
&vport_ctx);
if (err) {
mlx5_ib_err(dev, "query_hca_vport_context for port=%d failed %d\n",
port, err);
return err;
}
dev->port_caps[port - 1].has_smi = vport_ctx.has_smi;
} }
return 0; return 0;
} }
......
...@@ -537,6 +537,7 @@ void iser_login_rsp(struct ib_cq *cq, struct ib_wc *wc) ...@@ -537,6 +537,7 @@ void iser_login_rsp(struct ib_cq *cq, struct ib_wc *wc)
struct iscsi_hdr *hdr; struct iscsi_hdr *hdr;
char *data; char *data;
int length; int length;
bool full_feature_phase;
if (unlikely(wc->status != IB_WC_SUCCESS)) { if (unlikely(wc->status != IB_WC_SUCCESS)) {
iser_err_comp(wc, "login_rsp"); iser_err_comp(wc, "login_rsp");
...@@ -550,6 +551,9 @@ void iser_login_rsp(struct ib_cq *cq, struct ib_wc *wc) ...@@ -550,6 +551,9 @@ void iser_login_rsp(struct ib_cq *cq, struct ib_wc *wc)
hdr = desc->rsp + sizeof(struct iser_ctrl); hdr = desc->rsp + sizeof(struct iser_ctrl);
data = desc->rsp + ISER_HEADERS_LEN; data = desc->rsp + ISER_HEADERS_LEN;
length = wc->byte_len - ISER_HEADERS_LEN; length = wc->byte_len - ISER_HEADERS_LEN;
full_feature_phase = ((hdr->flags & ISCSI_FULL_FEATURE_PHASE) ==
ISCSI_FULL_FEATURE_PHASE) &&
(hdr->flags & ISCSI_FLAG_CMD_FINAL);
iser_dbg("op 0x%x itt 0x%x dlen %d\n", hdr->opcode, iser_dbg("op 0x%x itt 0x%x dlen %d\n", hdr->opcode,
hdr->itt, length); hdr->itt, length);
...@@ -560,7 +564,8 @@ void iser_login_rsp(struct ib_cq *cq, struct ib_wc *wc) ...@@ -560,7 +564,8 @@ void iser_login_rsp(struct ib_cq *cq, struct ib_wc *wc)
desc->rsp_dma, ISER_RX_LOGIN_SIZE, desc->rsp_dma, ISER_RX_LOGIN_SIZE,
DMA_FROM_DEVICE); DMA_FROM_DEVICE);
if (iser_conn->iscsi_conn->session->discovery_sess) if (!full_feature_phase ||
iser_conn->iscsi_conn->session->discovery_sess)
return; return;
/* Post the first RX buffer that is skipped in iser_post_rx_bufs() */ /* Post the first RX buffer that is skipped in iser_post_rx_bufs() */
......
...@@ -497,7 +497,7 @@ struct cpl_t5_pass_accept_rpl { ...@@ -497,7 +497,7 @@ struct cpl_t5_pass_accept_rpl {
__be32 opt2; __be32 opt2;
__be64 opt0; __be64 opt0;
__be32 iss; __be32 iss;
__be32 rsvd[3]; __be32 rsvd;
}; };
struct cpl_act_open_req { struct cpl_act_open_req {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment