Commit 884fe9da authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma

Pull rdma fixes from Jason Gunthorpe:

 - Fix 64K ARM page size support in bnxt_re and efa

 - bnxt_re fixes for a memory leak, incorrect error handling and a
   remove a bogus FW failure when running on a VF

 - Update MAINTAINERS for hns and efa

 - Fix two rxe regressions added this merge window in error unwind and
   incorrect spinlock primitives

 - hns gets a better algorithm for allocating page tables to avoid
   running out of resources, and a timeout adjustment

 - Fix a text case failure in hns

 - Use after free in irdma and fix incorrect construction of a WQE
   causing mis-execution

* tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma:
  RDMA/irdma: Fix Local Invalidate fencing
  RDMA/irdma: Prevent QP use after free
  MAINTAINERS: Update maintainer of Amazon EFA driver
  RDMA/bnxt_re: Do not enable congestion control on VFs
  RDMA/bnxt_re: Fix return value of bnxt_re_process_raw_qp_pkt_rx
  RDMA/bnxt_re: Fix a possible memory leak
  RDMA/hns: Modify the value of long message loopback slice
  RDMA/hns: Fix base address table allocation
  RDMA/hns: Fix timeout attr in query qp for HIP08
  RDMA/efa: Fix unsupported page sizes in device
  RDMA/rxe: Convert spin_{lock_bh,unlock_bh} to spin_{lock_irqsave,unlock_irqrestore}
  RDMA/rxe: Fix double unlock in rxe_qp.c
  MAINTAINERS: Update maintainers of HiSilicon RoCE
  RDMA/bnxt_re: Fix the page_size used during the MR creation
parents fd2186d1 5842d1d9
...@@ -956,7 +956,8 @@ F: Documentation/networking/device_drivers/ethernet/amazon/ena.rst ...@@ -956,7 +956,8 @@ F: Documentation/networking/device_drivers/ethernet/amazon/ena.rst
F: drivers/net/ethernet/amazon/ F: drivers/net/ethernet/amazon/
AMAZON RDMA EFA DRIVER AMAZON RDMA EFA DRIVER
M: Gal Pressman <galpress@amazon.com> M: Michael Margolin <mrgolin@amazon.com>
R: Gal Pressman <gal.pressman@linux.dev>
R: Yossi Leybovich <sleybo@amazon.com> R: Yossi Leybovich <sleybo@amazon.com>
L: linux-rdma@vger.kernel.org L: linux-rdma@vger.kernel.org
S: Supported S: Supported
...@@ -9343,7 +9344,7 @@ F: include/linux/hisi_acc_qm.h ...@@ -9343,7 +9344,7 @@ F: include/linux/hisi_acc_qm.h
HISILICON ROCE DRIVER HISILICON ROCE DRIVER
M: Haoyue Xu <xuhaoyue1@hisilicon.com> M: Haoyue Xu <xuhaoyue1@hisilicon.com>
M: Wenpeng Liang <liangwenpeng@huawei.com> M: Junxian Huang <huangjunxian6@hisilicon.com>
L: linux-rdma@vger.kernel.org L: linux-rdma@vger.kernel.org
S: Maintained S: Maintained
F: Documentation/devicetree/bindings/infiniband/hisilicon-hns-roce.txt F: Documentation/devicetree/bindings/infiniband/hisilicon-hns-roce.txt
......
...@@ -3341,9 +3341,7 @@ static int bnxt_re_process_raw_qp_pkt_rx(struct bnxt_re_qp *gsi_qp, ...@@ -3341,9 +3341,7 @@ static int bnxt_re_process_raw_qp_pkt_rx(struct bnxt_re_qp *gsi_qp,
udwr.remote_qkey = gsi_sqp->qplib_qp.qkey; udwr.remote_qkey = gsi_sqp->qplib_qp.qkey;
/* post data received in the send queue */ /* post data received in the send queue */
rc = bnxt_re_post_send_shadow_qp(rdev, gsi_sqp, swr); return bnxt_re_post_send_shadow_qp(rdev, gsi_sqp, swr);
return 0;
} }
static void bnxt_re_process_res_rawqp1_wc(struct ib_wc *wc, static void bnxt_re_process_res_rawqp1_wc(struct ib_wc *wc,
......
...@@ -1336,6 +1336,10 @@ static void bnxt_re_setup_cc(struct bnxt_re_dev *rdev, bool enable) ...@@ -1336,6 +1336,10 @@ static void bnxt_re_setup_cc(struct bnxt_re_dev *rdev, bool enable)
{ {
struct bnxt_qplib_cc_param cc_param = {}; struct bnxt_qplib_cc_param cc_param = {};
/* Do not enable congestion control on VFs */
if (rdev->is_virtfn)
return;
/* Currently enabling only for GenP5 adapters */ /* Currently enabling only for GenP5 adapters */
if (!bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx)) if (!bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx))
return; return;
......
...@@ -2056,6 +2056,12 @@ int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq) ...@@ -2056,6 +2056,12 @@ int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq)
u32 pg_sz_lvl; u32 pg_sz_lvl;
int rc; int rc;
if (!cq->dpi) {
dev_err(&rcfw->pdev->dev,
"FP: CREATE_CQ failed due to NULL DPI\n");
return -EINVAL;
}
hwq_attr.res = res; hwq_attr.res = res;
hwq_attr.depth = cq->max_wqe; hwq_attr.depth = cq->max_wqe;
hwq_attr.stride = sizeof(struct cq_base); hwq_attr.stride = sizeof(struct cq_base);
...@@ -2069,11 +2075,6 @@ int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq) ...@@ -2069,11 +2075,6 @@ int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq)
CMDQ_BASE_OPCODE_CREATE_CQ, CMDQ_BASE_OPCODE_CREATE_CQ,
sizeof(req)); sizeof(req));
if (!cq->dpi) {
dev_err(&rcfw->pdev->dev,
"FP: CREATE_CQ failed due to NULL DPI\n");
return -EINVAL;
}
req.dpi = cpu_to_le32(cq->dpi->dpi); req.dpi = cpu_to_le32(cq->dpi->dpi);
req.cq_handle = cpu_to_le64(cq->cq_handle); req.cq_handle = cpu_to_le64(cq->cq_handle);
req.cq_size = cpu_to_le32(cq->hwq.max_elements); req.cq_size = cpu_to_le32(cq->hwq.max_elements);
......
...@@ -215,17 +215,9 @@ int bnxt_qplib_alloc_init_hwq(struct bnxt_qplib_hwq *hwq, ...@@ -215,17 +215,9 @@ int bnxt_qplib_alloc_init_hwq(struct bnxt_qplib_hwq *hwq,
return -EINVAL; return -EINVAL;
hwq_attr->sginfo->npages = npages; hwq_attr->sginfo->npages = npages;
} else { } else {
unsigned long sginfo_num_pages = ib_umem_num_dma_blocks( npages = ib_umem_num_dma_blocks(hwq_attr->sginfo->umem,
hwq_attr->sginfo->umem, hwq_attr->sginfo->pgsize); hwq_attr->sginfo->pgsize);
hwq->is_user = true; hwq->is_user = true;
npages = sginfo_num_pages;
npages = (npages * PAGE_SIZE) /
BIT_ULL(hwq_attr->sginfo->pgshft);
if ((sginfo_num_pages * PAGE_SIZE) %
BIT_ULL(hwq_attr->sginfo->pgshft))
if (!npages)
npages++;
} }
if (npages == MAX_PBL_LVL_0_PGS && !hwq_attr->sginfo->nopte) { if (npages == MAX_PBL_LVL_0_PGS && !hwq_attr->sginfo->nopte) {
......
...@@ -617,16 +617,15 @@ int bnxt_qplib_reg_mr(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mr, ...@@ -617,16 +617,15 @@ int bnxt_qplib_reg_mr(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mr,
/* Free the hwq if it already exist, must be a rereg */ /* Free the hwq if it already exist, must be a rereg */
if (mr->hwq.max_elements) if (mr->hwq.max_elements)
bnxt_qplib_free_hwq(res, &mr->hwq); bnxt_qplib_free_hwq(res, &mr->hwq);
/* Use system PAGE_SIZE */
hwq_attr.res = res; hwq_attr.res = res;
hwq_attr.depth = pages; hwq_attr.depth = pages;
hwq_attr.stride = buf_pg_size; hwq_attr.stride = sizeof(dma_addr_t);
hwq_attr.type = HWQ_TYPE_MR; hwq_attr.type = HWQ_TYPE_MR;
hwq_attr.sginfo = &sginfo; hwq_attr.sginfo = &sginfo;
hwq_attr.sginfo->umem = umem; hwq_attr.sginfo->umem = umem;
hwq_attr.sginfo->npages = pages; hwq_attr.sginfo->npages = pages;
hwq_attr.sginfo->pgsize = PAGE_SIZE; hwq_attr.sginfo->pgsize = buf_pg_size;
hwq_attr.sginfo->pgshft = PAGE_SHIFT; hwq_attr.sginfo->pgshft = ilog2(buf_pg_size);
rc = bnxt_qplib_alloc_init_hwq(&mr->hwq, &hwq_attr); rc = bnxt_qplib_alloc_init_hwq(&mr->hwq, &hwq_attr);
if (rc) { if (rc) {
dev_err(&res->pdev->dev, dev_err(&res->pdev->dev,
......
...@@ -1403,7 +1403,7 @@ static int pbl_continuous_initialize(struct efa_dev *dev, ...@@ -1403,7 +1403,7 @@ static int pbl_continuous_initialize(struct efa_dev *dev,
*/ */
static int pbl_indirect_initialize(struct efa_dev *dev, struct pbl_context *pbl) static int pbl_indirect_initialize(struct efa_dev *dev, struct pbl_context *pbl)
{ {
u32 size_in_pages = DIV_ROUND_UP(pbl->pbl_buf_size_in_bytes, PAGE_SIZE); u32 size_in_pages = DIV_ROUND_UP(pbl->pbl_buf_size_in_bytes, EFA_CHUNK_PAYLOAD_SIZE);
struct scatterlist *sgl; struct scatterlist *sgl;
int sg_dma_cnt, err; int sg_dma_cnt, err;
......
...@@ -4583,11 +4583,9 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp, ...@@ -4583,11 +4583,9 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
mtu = ib_mtu_enum_to_int(ib_mtu); mtu = ib_mtu_enum_to_int(ib_mtu);
if (WARN_ON(mtu <= 0)) if (WARN_ON(mtu <= 0))
return -EINVAL; return -EINVAL;
#define MAX_LP_MSG_LEN 16384 #define MIN_LP_MSG_LEN 1024
/* MTU * (2 ^ LP_PKTN_INI) shouldn't be bigger than 16KB */ /* mtu * (2 ^ lp_pktn_ini) should be in the range of 1024 to mtu */
lp_pktn_ini = ilog2(MAX_LP_MSG_LEN / mtu); lp_pktn_ini = ilog2(max(mtu, MIN_LP_MSG_LEN) / mtu);
if (WARN_ON(lp_pktn_ini >= 0xF))
return -EINVAL;
if (attr_mask & IB_QP_PATH_MTU) { if (attr_mask & IB_QP_PATH_MTU) {
hr_reg_write(context, QPC_MTU, ib_mtu); hr_reg_write(context, QPC_MTU, ib_mtu);
...@@ -5012,7 +5010,6 @@ static int hns_roce_v2_set_abs_fields(struct ib_qp *ibqp, ...@@ -5012,7 +5010,6 @@ static int hns_roce_v2_set_abs_fields(struct ib_qp *ibqp,
static bool check_qp_timeout_cfg_range(struct hns_roce_dev *hr_dev, u8 *timeout) static bool check_qp_timeout_cfg_range(struct hns_roce_dev *hr_dev, u8 *timeout)
{ {
#define QP_ACK_TIMEOUT_MAX_HIP08 20 #define QP_ACK_TIMEOUT_MAX_HIP08 20
#define QP_ACK_TIMEOUT_OFFSET 10
#define QP_ACK_TIMEOUT_MAX 31 #define QP_ACK_TIMEOUT_MAX 31
if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08) { if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08) {
...@@ -5021,7 +5018,7 @@ static bool check_qp_timeout_cfg_range(struct hns_roce_dev *hr_dev, u8 *timeout) ...@@ -5021,7 +5018,7 @@ static bool check_qp_timeout_cfg_range(struct hns_roce_dev *hr_dev, u8 *timeout)
"local ACK timeout shall be 0 to 20.\n"); "local ACK timeout shall be 0 to 20.\n");
return false; return false;
} }
*timeout += QP_ACK_TIMEOUT_OFFSET; *timeout += HNS_ROCE_V2_QP_ACK_TIMEOUT_OFS_HIP08;
} else if (hr_dev->pci_dev->revision > PCI_REVISION_ID_HIP08) { } else if (hr_dev->pci_dev->revision > PCI_REVISION_ID_HIP08) {
if (*timeout > QP_ACK_TIMEOUT_MAX) { if (*timeout > QP_ACK_TIMEOUT_MAX) {
ibdev_warn(&hr_dev->ib_dev, ibdev_warn(&hr_dev->ib_dev,
...@@ -5307,6 +5304,18 @@ static int hns_roce_v2_query_qpc(struct hns_roce_dev *hr_dev, u32 qpn, ...@@ -5307,6 +5304,18 @@ static int hns_roce_v2_query_qpc(struct hns_roce_dev *hr_dev, u32 qpn,
return ret; return ret;
} }
static u8 get_qp_timeout_attr(struct hns_roce_dev *hr_dev,
struct hns_roce_v2_qp_context *context)
{
u8 timeout;
timeout = (u8)hr_reg_read(context, QPC_AT);
if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08)
timeout -= HNS_ROCE_V2_QP_ACK_TIMEOUT_OFS_HIP08;
return timeout;
}
static int hns_roce_v2_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, static int hns_roce_v2_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
int qp_attr_mask, int qp_attr_mask,
struct ib_qp_init_attr *qp_init_attr) struct ib_qp_init_attr *qp_init_attr)
...@@ -5384,7 +5393,7 @@ static int hns_roce_v2_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, ...@@ -5384,7 +5393,7 @@ static int hns_roce_v2_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
qp_attr->max_dest_rd_atomic = 1 << hr_reg_read(&context, QPC_RR_MAX); qp_attr->max_dest_rd_atomic = 1 << hr_reg_read(&context, QPC_RR_MAX);
qp_attr->min_rnr_timer = (u8)hr_reg_read(&context, QPC_MIN_RNR_TIME); qp_attr->min_rnr_timer = (u8)hr_reg_read(&context, QPC_MIN_RNR_TIME);
qp_attr->timeout = (u8)hr_reg_read(&context, QPC_AT); qp_attr->timeout = get_qp_timeout_attr(hr_dev, &context);
qp_attr->retry_cnt = hr_reg_read(&context, QPC_RETRY_NUM_INIT); qp_attr->retry_cnt = hr_reg_read(&context, QPC_RETRY_NUM_INIT);
qp_attr->rnr_retry = hr_reg_read(&context, QPC_RNR_NUM_INIT); qp_attr->rnr_retry = hr_reg_read(&context, QPC_RNR_NUM_INIT);
......
...@@ -44,6 +44,8 @@ ...@@ -44,6 +44,8 @@
#define HNS_ROCE_V2_MAX_XRCD_NUM 0x1000000 #define HNS_ROCE_V2_MAX_XRCD_NUM 0x1000000
#define HNS_ROCE_V2_RSV_XRCD_NUM 0 #define HNS_ROCE_V2_RSV_XRCD_NUM 0
#define HNS_ROCE_V2_QP_ACK_TIMEOUT_OFS_HIP08 10
#define HNS_ROCE_V3_SCCC_SZ 64 #define HNS_ROCE_V3_SCCC_SZ 64
#define HNS_ROCE_V3_GMV_ENTRY_SZ 32 #define HNS_ROCE_V3_GMV_ENTRY_SZ 32
......
...@@ -33,6 +33,7 @@ ...@@ -33,6 +33,7 @@
#include <linux/vmalloc.h> #include <linux/vmalloc.h>
#include <rdma/ib_umem.h> #include <rdma/ib_umem.h>
#include <linux/math.h>
#include "hns_roce_device.h" #include "hns_roce_device.h"
#include "hns_roce_cmd.h" #include "hns_roce_cmd.h"
#include "hns_roce_hem.h" #include "hns_roce_hem.h"
...@@ -909,6 +910,44 @@ static int mtr_init_buf_cfg(struct hns_roce_dev *hr_dev, ...@@ -909,6 +910,44 @@ static int mtr_init_buf_cfg(struct hns_roce_dev *hr_dev,
return page_cnt; return page_cnt;
} }
static u64 cal_pages_per_l1ba(unsigned int ba_per_bt, unsigned int hopnum)
{
return int_pow(ba_per_bt, hopnum - 1);
}
static unsigned int cal_best_bt_pg_sz(struct hns_roce_dev *hr_dev,
struct hns_roce_mtr *mtr,
unsigned int pg_shift)
{
unsigned long cap = hr_dev->caps.page_size_cap;
struct hns_roce_buf_region *re;
unsigned int pgs_per_l1ba;
unsigned int ba_per_bt;
unsigned int ba_num;
int i;
for_each_set_bit_from(pg_shift, &cap, sizeof(cap) * BITS_PER_BYTE) {
if (!(BIT(pg_shift) & cap))
continue;
ba_per_bt = BIT(pg_shift) / BA_BYTE_LEN;
ba_num = 0;
for (i = 0; i < mtr->hem_cfg.region_count; i++) {
re = &mtr->hem_cfg.region[i];
if (re->hopnum == 0)
continue;
pgs_per_l1ba = cal_pages_per_l1ba(ba_per_bt, re->hopnum);
ba_num += DIV_ROUND_UP(re->count, pgs_per_l1ba);
}
if (ba_num <= ba_per_bt)
return pg_shift;
}
return 0;
}
static int mtr_alloc_mtt(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr, static int mtr_alloc_mtt(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
unsigned int ba_page_shift) unsigned int ba_page_shift)
{ {
...@@ -917,6 +956,10 @@ static int mtr_alloc_mtt(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr, ...@@ -917,6 +956,10 @@ static int mtr_alloc_mtt(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
hns_roce_hem_list_init(&mtr->hem_list); hns_roce_hem_list_init(&mtr->hem_list);
if (!cfg->is_direct) { if (!cfg->is_direct) {
ba_page_shift = cal_best_bt_pg_sz(hr_dev, mtr, ba_page_shift);
if (!ba_page_shift)
return -ERANGE;
ret = hns_roce_hem_list_request(hr_dev, &mtr->hem_list, ret = hns_roce_hem_list_request(hr_dev, &mtr->hem_list,
cfg->region, cfg->region_count, cfg->region, cfg->region_count,
ba_page_shift); ba_page_shift);
......
...@@ -522,11 +522,6 @@ static int irdma_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata) ...@@ -522,11 +522,6 @@ static int irdma_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
if (!iwqp->user_mode) if (!iwqp->user_mode)
cancel_delayed_work_sync(&iwqp->dwork_flush); cancel_delayed_work_sync(&iwqp->dwork_flush);
irdma_qp_rem_ref(&iwqp->ibqp);
wait_for_completion(&iwqp->free_qp);
irdma_free_lsmm_rsrc(iwqp);
irdma_cqp_qp_destroy_cmd(&iwdev->rf->sc_dev, &iwqp->sc_qp);
if (!iwqp->user_mode) { if (!iwqp->user_mode) {
if (iwqp->iwscq) { if (iwqp->iwscq) {
irdma_clean_cqes(iwqp, iwqp->iwscq); irdma_clean_cqes(iwqp, iwqp->iwscq);
...@@ -534,6 +529,12 @@ static int irdma_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata) ...@@ -534,6 +529,12 @@ static int irdma_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
irdma_clean_cqes(iwqp, iwqp->iwrcq); irdma_clean_cqes(iwqp, iwqp->iwrcq);
} }
} }
irdma_qp_rem_ref(&iwqp->ibqp);
wait_for_completion(&iwqp->free_qp);
irdma_free_lsmm_rsrc(iwqp);
irdma_cqp_qp_destroy_cmd(&iwdev->rf->sc_dev, &iwqp->sc_qp);
irdma_remove_push_mmap_entries(iwqp); irdma_remove_push_mmap_entries(iwqp);
irdma_free_qp_rsrc(iwqp); irdma_free_qp_rsrc(iwqp);
...@@ -3291,6 +3292,7 @@ static int irdma_post_send(struct ib_qp *ibqp, ...@@ -3291,6 +3292,7 @@ static int irdma_post_send(struct ib_qp *ibqp,
break; break;
case IB_WR_LOCAL_INV: case IB_WR_LOCAL_INV:
info.op_type = IRDMA_OP_TYPE_INV_STAG; info.op_type = IRDMA_OP_TYPE_INV_STAG;
info.local_fence = info.read_fence;
info.op.inv_local_stag.target_stag = ib_wr->ex.invalidate_rkey; info.op.inv_local_stag.target_stag = ib_wr->ex.invalidate_rkey;
err = irdma_uk_stag_local_invalidate(ukqp, &info, true); err = irdma_uk_stag_local_invalidate(ukqp, &info, true);
break; break;
......
...@@ -115,15 +115,16 @@ static enum ib_wc_opcode wr_to_wc_opcode(enum ib_wr_opcode opcode) ...@@ -115,15 +115,16 @@ static enum ib_wc_opcode wr_to_wc_opcode(enum ib_wr_opcode opcode)
void retransmit_timer(struct timer_list *t) void retransmit_timer(struct timer_list *t)
{ {
struct rxe_qp *qp = from_timer(qp, t, retrans_timer); struct rxe_qp *qp = from_timer(qp, t, retrans_timer);
unsigned long flags;
rxe_dbg_qp(qp, "retransmit timer fired\n"); rxe_dbg_qp(qp, "retransmit timer fired\n");
spin_lock_bh(&qp->state_lock); spin_lock_irqsave(&qp->state_lock, flags);
if (qp->valid) { if (qp->valid) {
qp->comp.timeout = 1; qp->comp.timeout = 1;
rxe_sched_task(&qp->comp.task); rxe_sched_task(&qp->comp.task);
} }
spin_unlock_bh(&qp->state_lock); spin_unlock_irqrestore(&qp->state_lock, flags);
} }
void rxe_comp_queue_pkt(struct rxe_qp *qp, struct sk_buff *skb) void rxe_comp_queue_pkt(struct rxe_qp *qp, struct sk_buff *skb)
...@@ -481,11 +482,13 @@ static void do_complete(struct rxe_qp *qp, struct rxe_send_wqe *wqe) ...@@ -481,11 +482,13 @@ static void do_complete(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
static void comp_check_sq_drain_done(struct rxe_qp *qp) static void comp_check_sq_drain_done(struct rxe_qp *qp)
{ {
spin_lock_bh(&qp->state_lock); unsigned long flags;
spin_lock_irqsave(&qp->state_lock, flags);
if (unlikely(qp_state(qp) == IB_QPS_SQD)) { if (unlikely(qp_state(qp) == IB_QPS_SQD)) {
if (qp->attr.sq_draining && qp->comp.psn == qp->req.psn) { if (qp->attr.sq_draining && qp->comp.psn == qp->req.psn) {
qp->attr.sq_draining = 0; qp->attr.sq_draining = 0;
spin_unlock_bh(&qp->state_lock); spin_unlock_irqrestore(&qp->state_lock, flags);
if (qp->ibqp.event_handler) { if (qp->ibqp.event_handler) {
struct ib_event ev; struct ib_event ev;
...@@ -499,7 +502,7 @@ static void comp_check_sq_drain_done(struct rxe_qp *qp) ...@@ -499,7 +502,7 @@ static void comp_check_sq_drain_done(struct rxe_qp *qp)
return; return;
} }
} }
spin_unlock_bh(&qp->state_lock); spin_unlock_irqrestore(&qp->state_lock, flags);
} }
static inline enum comp_state complete_ack(struct rxe_qp *qp, static inline enum comp_state complete_ack(struct rxe_qp *qp,
...@@ -625,13 +628,15 @@ static void free_pkt(struct rxe_pkt_info *pkt) ...@@ -625,13 +628,15 @@ static void free_pkt(struct rxe_pkt_info *pkt)
*/ */
static void reset_retry_timer(struct rxe_qp *qp) static void reset_retry_timer(struct rxe_qp *qp)
{ {
unsigned long flags;
if (qp_type(qp) == IB_QPT_RC && qp->qp_timeout_jiffies) { if (qp_type(qp) == IB_QPT_RC && qp->qp_timeout_jiffies) {
spin_lock_bh(&qp->state_lock); spin_lock_irqsave(&qp->state_lock, flags);
if (qp_state(qp) >= IB_QPS_RTS && if (qp_state(qp) >= IB_QPS_RTS &&
psn_compare(qp->req.psn, qp->comp.psn) > 0) psn_compare(qp->req.psn, qp->comp.psn) > 0)
mod_timer(&qp->retrans_timer, mod_timer(&qp->retrans_timer,
jiffies + qp->qp_timeout_jiffies); jiffies + qp->qp_timeout_jiffies);
spin_unlock_bh(&qp->state_lock); spin_unlock_irqrestore(&qp->state_lock, flags);
} }
} }
...@@ -643,18 +648,19 @@ int rxe_completer(struct rxe_qp *qp) ...@@ -643,18 +648,19 @@ int rxe_completer(struct rxe_qp *qp)
struct rxe_pkt_info *pkt = NULL; struct rxe_pkt_info *pkt = NULL;
enum comp_state state; enum comp_state state;
int ret; int ret;
unsigned long flags;
spin_lock_bh(&qp->state_lock); spin_lock_irqsave(&qp->state_lock, flags);
if (!qp->valid || qp_state(qp) == IB_QPS_ERR || if (!qp->valid || qp_state(qp) == IB_QPS_ERR ||
qp_state(qp) == IB_QPS_RESET) { qp_state(qp) == IB_QPS_RESET) {
bool notify = qp->valid && (qp_state(qp) == IB_QPS_ERR); bool notify = qp->valid && (qp_state(qp) == IB_QPS_ERR);
drain_resp_pkts(qp); drain_resp_pkts(qp);
flush_send_queue(qp, notify); flush_send_queue(qp, notify);
spin_unlock_bh(&qp->state_lock); spin_unlock_irqrestore(&qp->state_lock, flags);
goto exit; goto exit;
} }
spin_unlock_bh(&qp->state_lock); spin_unlock_irqrestore(&qp->state_lock, flags);
if (qp->comp.timeout) { if (qp->comp.timeout) {
qp->comp.timeout_retry = 1; qp->comp.timeout_retry = 1;
......
...@@ -412,15 +412,16 @@ int rxe_xmit_packet(struct rxe_qp *qp, struct rxe_pkt_info *pkt, ...@@ -412,15 +412,16 @@ int rxe_xmit_packet(struct rxe_qp *qp, struct rxe_pkt_info *pkt,
int err; int err;
int is_request = pkt->mask & RXE_REQ_MASK; int is_request = pkt->mask & RXE_REQ_MASK;
struct rxe_dev *rxe = to_rdev(qp->ibqp.device); struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
unsigned long flags;
spin_lock_bh(&qp->state_lock); spin_lock_irqsave(&qp->state_lock, flags);
if ((is_request && (qp_state(qp) < IB_QPS_RTS)) || if ((is_request && (qp_state(qp) < IB_QPS_RTS)) ||
(!is_request && (qp_state(qp) < IB_QPS_RTR))) { (!is_request && (qp_state(qp) < IB_QPS_RTR))) {
spin_unlock_bh(&qp->state_lock); spin_unlock_irqrestore(&qp->state_lock, flags);
rxe_dbg_qp(qp, "Packet dropped. QP is not in ready state\n"); rxe_dbg_qp(qp, "Packet dropped. QP is not in ready state\n");
goto drop; goto drop;
} }
spin_unlock_bh(&qp->state_lock); spin_unlock_irqrestore(&qp->state_lock, flags);
rxe_icrc_generate(skb, pkt); rxe_icrc_generate(skb, pkt);
......
...@@ -300,6 +300,7 @@ int rxe_qp_from_init(struct rxe_dev *rxe, struct rxe_qp *qp, struct rxe_pd *pd, ...@@ -300,6 +300,7 @@ int rxe_qp_from_init(struct rxe_dev *rxe, struct rxe_qp *qp, struct rxe_pd *pd,
struct rxe_cq *rcq = to_rcq(init->recv_cq); struct rxe_cq *rcq = to_rcq(init->recv_cq);
struct rxe_cq *scq = to_rcq(init->send_cq); struct rxe_cq *scq = to_rcq(init->send_cq);
struct rxe_srq *srq = init->srq ? to_rsrq(init->srq) : NULL; struct rxe_srq *srq = init->srq ? to_rsrq(init->srq) : NULL;
unsigned long flags;
rxe_get(pd); rxe_get(pd);
rxe_get(rcq); rxe_get(rcq);
...@@ -325,10 +326,10 @@ int rxe_qp_from_init(struct rxe_dev *rxe, struct rxe_qp *qp, struct rxe_pd *pd, ...@@ -325,10 +326,10 @@ int rxe_qp_from_init(struct rxe_dev *rxe, struct rxe_qp *qp, struct rxe_pd *pd,
if (err) if (err)
goto err2; goto err2;
spin_lock_bh(&qp->state_lock); spin_lock_irqsave(&qp->state_lock, flags);
qp->attr.qp_state = IB_QPS_RESET; qp->attr.qp_state = IB_QPS_RESET;
qp->valid = 1; qp->valid = 1;
spin_unlock_bh(&qp->state_lock); spin_unlock_irqrestore(&qp->state_lock, flags);
return 0; return 0;
...@@ -492,24 +493,28 @@ static void rxe_qp_reset(struct rxe_qp *qp) ...@@ -492,24 +493,28 @@ static void rxe_qp_reset(struct rxe_qp *qp)
/* move the qp to the error state */ /* move the qp to the error state */
void rxe_qp_error(struct rxe_qp *qp) void rxe_qp_error(struct rxe_qp *qp)
{ {
spin_lock_bh(&qp->state_lock); unsigned long flags;
spin_lock_irqsave(&qp->state_lock, flags);
qp->attr.qp_state = IB_QPS_ERR; qp->attr.qp_state = IB_QPS_ERR;
/* drain work and packet queues */ /* drain work and packet queues */
rxe_sched_task(&qp->resp.task); rxe_sched_task(&qp->resp.task);
rxe_sched_task(&qp->comp.task); rxe_sched_task(&qp->comp.task);
rxe_sched_task(&qp->req.task); rxe_sched_task(&qp->req.task);
spin_unlock_bh(&qp->state_lock); spin_unlock_irqrestore(&qp->state_lock, flags);
} }
static void rxe_qp_sqd(struct rxe_qp *qp, struct ib_qp_attr *attr, static void rxe_qp_sqd(struct rxe_qp *qp, struct ib_qp_attr *attr,
int mask) int mask)
{ {
spin_lock_bh(&qp->state_lock); unsigned long flags;
spin_lock_irqsave(&qp->state_lock, flags);
qp->attr.sq_draining = 1; qp->attr.sq_draining = 1;
rxe_sched_task(&qp->comp.task); rxe_sched_task(&qp->comp.task);
rxe_sched_task(&qp->req.task); rxe_sched_task(&qp->req.task);
spin_unlock_bh(&qp->state_lock); spin_unlock_irqrestore(&qp->state_lock, flags);
} }
/* caller should hold qp->state_lock */ /* caller should hold qp->state_lock */
...@@ -555,14 +560,16 @@ int rxe_qp_from_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask, ...@@ -555,14 +560,16 @@ int rxe_qp_from_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask,
qp->attr.cur_qp_state = attr->qp_state; qp->attr.cur_qp_state = attr->qp_state;
if (mask & IB_QP_STATE) { if (mask & IB_QP_STATE) {
spin_lock_bh(&qp->state_lock); unsigned long flags;
spin_lock_irqsave(&qp->state_lock, flags);
err = __qp_chk_state(qp, attr, mask); err = __qp_chk_state(qp, attr, mask);
if (!err) { if (!err) {
qp->attr.qp_state = attr->qp_state; qp->attr.qp_state = attr->qp_state;
rxe_dbg_qp(qp, "state -> %s\n", rxe_dbg_qp(qp, "state -> %s\n",
qps2str[attr->qp_state]); qps2str[attr->qp_state]);
} }
spin_unlock_bh(&qp->state_lock); spin_unlock_irqrestore(&qp->state_lock, flags);
if (err) if (err)
return err; return err;
...@@ -688,6 +695,8 @@ int rxe_qp_from_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask, ...@@ -688,6 +695,8 @@ int rxe_qp_from_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask,
/* called by the query qp verb */ /* called by the query qp verb */
int rxe_qp_to_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask) int rxe_qp_to_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask)
{ {
unsigned long flags;
*attr = qp->attr; *attr = qp->attr;
attr->rq_psn = qp->resp.psn; attr->rq_psn = qp->resp.psn;
...@@ -708,12 +717,13 @@ int rxe_qp_to_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask) ...@@ -708,12 +717,13 @@ int rxe_qp_to_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask)
/* Applications that get this state typically spin on it. /* Applications that get this state typically spin on it.
* Yield the processor * Yield the processor
*/ */
spin_lock_bh(&qp->state_lock); spin_lock_irqsave(&qp->state_lock, flags);
if (qp->attr.sq_draining) { if (qp->attr.sq_draining) {
spin_unlock_bh(&qp->state_lock); spin_unlock_irqrestore(&qp->state_lock, flags);
cond_resched(); cond_resched();
} else {
spin_unlock_irqrestore(&qp->state_lock, flags);
} }
spin_unlock_bh(&qp->state_lock);
return 0; return 0;
} }
...@@ -736,10 +746,11 @@ int rxe_qp_chk_destroy(struct rxe_qp *qp) ...@@ -736,10 +746,11 @@ int rxe_qp_chk_destroy(struct rxe_qp *qp)
static void rxe_qp_do_cleanup(struct work_struct *work) static void rxe_qp_do_cleanup(struct work_struct *work)
{ {
struct rxe_qp *qp = container_of(work, typeof(*qp), cleanup_work.work); struct rxe_qp *qp = container_of(work, typeof(*qp), cleanup_work.work);
unsigned long flags;
spin_lock_bh(&qp->state_lock); spin_lock_irqsave(&qp->state_lock, flags);
qp->valid = 0; qp->valid = 0;
spin_unlock_bh(&qp->state_lock); spin_unlock_irqrestore(&qp->state_lock, flags);
qp->qp_timeout_jiffies = 0; qp->qp_timeout_jiffies = 0;
if (qp_type(qp) == IB_QPT_RC) { if (qp_type(qp) == IB_QPT_RC) {
......
...@@ -14,6 +14,7 @@ static int check_type_state(struct rxe_dev *rxe, struct rxe_pkt_info *pkt, ...@@ -14,6 +14,7 @@ static int check_type_state(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
struct rxe_qp *qp) struct rxe_qp *qp)
{ {
unsigned int pkt_type; unsigned int pkt_type;
unsigned long flags;
if (unlikely(!qp->valid)) if (unlikely(!qp->valid))
return -EINVAL; return -EINVAL;
...@@ -38,19 +39,19 @@ static int check_type_state(struct rxe_dev *rxe, struct rxe_pkt_info *pkt, ...@@ -38,19 +39,19 @@ static int check_type_state(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
return -EINVAL; return -EINVAL;
} }
spin_lock_bh(&qp->state_lock); spin_lock_irqsave(&qp->state_lock, flags);
if (pkt->mask & RXE_REQ_MASK) { if (pkt->mask & RXE_REQ_MASK) {
if (unlikely(qp_state(qp) < IB_QPS_RTR)) { if (unlikely(qp_state(qp) < IB_QPS_RTR)) {
spin_unlock_bh(&qp->state_lock); spin_unlock_irqrestore(&qp->state_lock, flags);
return -EINVAL; return -EINVAL;
} }
} else { } else {
if (unlikely(qp_state(qp) < IB_QPS_RTS)) { if (unlikely(qp_state(qp) < IB_QPS_RTS)) {
spin_unlock_bh(&qp->state_lock); spin_unlock_irqrestore(&qp->state_lock, flags);
return -EINVAL; return -EINVAL;
} }
} }
spin_unlock_bh(&qp->state_lock); spin_unlock_irqrestore(&qp->state_lock, flags);
return 0; return 0;
} }
......
...@@ -99,17 +99,18 @@ static void req_retry(struct rxe_qp *qp) ...@@ -99,17 +99,18 @@ static void req_retry(struct rxe_qp *qp)
void rnr_nak_timer(struct timer_list *t) void rnr_nak_timer(struct timer_list *t)
{ {
struct rxe_qp *qp = from_timer(qp, t, rnr_nak_timer); struct rxe_qp *qp = from_timer(qp, t, rnr_nak_timer);
unsigned long flags;
rxe_dbg_qp(qp, "nak timer fired\n"); rxe_dbg_qp(qp, "nak timer fired\n");
spin_lock_bh(&qp->state_lock); spin_lock_irqsave(&qp->state_lock, flags);
if (qp->valid) { if (qp->valid) {
/* request a send queue retry */ /* request a send queue retry */
qp->req.need_retry = 1; qp->req.need_retry = 1;
qp->req.wait_for_rnr_timer = 0; qp->req.wait_for_rnr_timer = 0;
rxe_sched_task(&qp->req.task); rxe_sched_task(&qp->req.task);
} }
spin_unlock_bh(&qp->state_lock); spin_unlock_irqrestore(&qp->state_lock, flags);
} }
static void req_check_sq_drain_done(struct rxe_qp *qp) static void req_check_sq_drain_done(struct rxe_qp *qp)
...@@ -118,8 +119,9 @@ static void req_check_sq_drain_done(struct rxe_qp *qp) ...@@ -118,8 +119,9 @@ static void req_check_sq_drain_done(struct rxe_qp *qp)
unsigned int index; unsigned int index;
unsigned int cons; unsigned int cons;
struct rxe_send_wqe *wqe; struct rxe_send_wqe *wqe;
unsigned long flags;
spin_lock_bh(&qp->state_lock); spin_lock_irqsave(&qp->state_lock, flags);
if (qp_state(qp) == IB_QPS_SQD) { if (qp_state(qp) == IB_QPS_SQD) {
q = qp->sq.queue; q = qp->sq.queue;
index = qp->req.wqe_index; index = qp->req.wqe_index;
...@@ -140,7 +142,7 @@ static void req_check_sq_drain_done(struct rxe_qp *qp) ...@@ -140,7 +142,7 @@ static void req_check_sq_drain_done(struct rxe_qp *qp)
break; break;
qp->attr.sq_draining = 0; qp->attr.sq_draining = 0;
spin_unlock_bh(&qp->state_lock); spin_unlock_irqrestore(&qp->state_lock, flags);
if (qp->ibqp.event_handler) { if (qp->ibqp.event_handler) {
struct ib_event ev; struct ib_event ev;
...@@ -154,7 +156,7 @@ static void req_check_sq_drain_done(struct rxe_qp *qp) ...@@ -154,7 +156,7 @@ static void req_check_sq_drain_done(struct rxe_qp *qp)
return; return;
} while (0); } while (0);
} }
spin_unlock_bh(&qp->state_lock); spin_unlock_irqrestore(&qp->state_lock, flags);
} }
static struct rxe_send_wqe *__req_next_wqe(struct rxe_qp *qp) static struct rxe_send_wqe *__req_next_wqe(struct rxe_qp *qp)
...@@ -173,6 +175,7 @@ static struct rxe_send_wqe *__req_next_wqe(struct rxe_qp *qp) ...@@ -173,6 +175,7 @@ static struct rxe_send_wqe *__req_next_wqe(struct rxe_qp *qp)
static struct rxe_send_wqe *req_next_wqe(struct rxe_qp *qp) static struct rxe_send_wqe *req_next_wqe(struct rxe_qp *qp)
{ {
struct rxe_send_wqe *wqe; struct rxe_send_wqe *wqe;
unsigned long flags;
req_check_sq_drain_done(qp); req_check_sq_drain_done(qp);
...@@ -180,13 +183,13 @@ static struct rxe_send_wqe *req_next_wqe(struct rxe_qp *qp) ...@@ -180,13 +183,13 @@ static struct rxe_send_wqe *req_next_wqe(struct rxe_qp *qp)
if (wqe == NULL) if (wqe == NULL)
return NULL; return NULL;
spin_lock_bh(&qp->state_lock); spin_lock_irqsave(&qp->state_lock, flags);
if (unlikely((qp_state(qp) == IB_QPS_SQD) && if (unlikely((qp_state(qp) == IB_QPS_SQD) &&
(wqe->state != wqe_state_processing))) { (wqe->state != wqe_state_processing))) {
spin_unlock_bh(&qp->state_lock); spin_unlock_irqrestore(&qp->state_lock, flags);
return NULL; return NULL;
} }
spin_unlock_bh(&qp->state_lock); spin_unlock_irqrestore(&qp->state_lock, flags);
wqe->mask = wr_opcode_mask(wqe->wr.opcode, qp); wqe->mask = wr_opcode_mask(wqe->wr.opcode, qp);
return wqe; return wqe;
...@@ -676,16 +679,17 @@ int rxe_requester(struct rxe_qp *qp) ...@@ -676,16 +679,17 @@ int rxe_requester(struct rxe_qp *qp)
struct rxe_queue *q = qp->sq.queue; struct rxe_queue *q = qp->sq.queue;
struct rxe_ah *ah; struct rxe_ah *ah;
struct rxe_av *av; struct rxe_av *av;
unsigned long flags;
spin_lock_bh(&qp->state_lock); spin_lock_irqsave(&qp->state_lock, flags);
if (unlikely(!qp->valid)) { if (unlikely(!qp->valid)) {
spin_unlock_bh(&qp->state_lock); spin_unlock_irqrestore(&qp->state_lock, flags);
goto exit; goto exit;
} }
if (unlikely(qp_state(qp) == IB_QPS_ERR)) { if (unlikely(qp_state(qp) == IB_QPS_ERR)) {
wqe = __req_next_wqe(qp); wqe = __req_next_wqe(qp);
spin_unlock_bh(&qp->state_lock); spin_unlock_irqrestore(&qp->state_lock, flags);
if (wqe) if (wqe)
goto err; goto err;
else else
...@@ -700,10 +704,10 @@ int rxe_requester(struct rxe_qp *qp) ...@@ -700,10 +704,10 @@ int rxe_requester(struct rxe_qp *qp)
qp->req.wait_psn = 0; qp->req.wait_psn = 0;
qp->req.need_retry = 0; qp->req.need_retry = 0;
qp->req.wait_for_rnr_timer = 0; qp->req.wait_for_rnr_timer = 0;
spin_unlock_bh(&qp->state_lock); spin_unlock_irqrestore(&qp->state_lock, flags);
goto exit; goto exit;
} }
spin_unlock_bh(&qp->state_lock); spin_unlock_irqrestore(&qp->state_lock, flags);
/* we come here if the retransmit timer has fired /* we come here if the retransmit timer has fired
* or if the rnr timer has fired. If the retransmit * or if the rnr timer has fired. If the retransmit
......
...@@ -1047,6 +1047,7 @@ static enum resp_states do_complete(struct rxe_qp *qp, ...@@ -1047,6 +1047,7 @@ static enum resp_states do_complete(struct rxe_qp *qp,
struct ib_uverbs_wc *uwc = &cqe.uibwc; struct ib_uverbs_wc *uwc = &cqe.uibwc;
struct rxe_recv_wqe *wqe = qp->resp.wqe; struct rxe_recv_wqe *wqe = qp->resp.wqe;
struct rxe_dev *rxe = to_rdev(qp->ibqp.device); struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
unsigned long flags;
if (!wqe) if (!wqe)
goto finish; goto finish;
...@@ -1137,12 +1138,12 @@ static enum resp_states do_complete(struct rxe_qp *qp, ...@@ -1137,12 +1138,12 @@ static enum resp_states do_complete(struct rxe_qp *qp,
return RESPST_ERR_CQ_OVERFLOW; return RESPST_ERR_CQ_OVERFLOW;
finish: finish:
spin_lock_bh(&qp->state_lock); spin_lock_irqsave(&qp->state_lock, flags);
if (unlikely(qp_state(qp) == IB_QPS_ERR)) { if (unlikely(qp_state(qp) == IB_QPS_ERR)) {
spin_unlock_bh(&qp->state_lock); spin_unlock_irqrestore(&qp->state_lock, flags);
return RESPST_CHK_RESOURCE; return RESPST_CHK_RESOURCE;
} }
spin_unlock_bh(&qp->state_lock); spin_unlock_irqrestore(&qp->state_lock, flags);
if (unlikely(!pkt)) if (unlikely(!pkt))
return RESPST_DONE; return RESPST_DONE;
...@@ -1468,18 +1469,19 @@ int rxe_responder(struct rxe_qp *qp) ...@@ -1468,18 +1469,19 @@ int rxe_responder(struct rxe_qp *qp)
enum resp_states state; enum resp_states state;
struct rxe_pkt_info *pkt = NULL; struct rxe_pkt_info *pkt = NULL;
int ret; int ret;
unsigned long flags;
spin_lock_bh(&qp->state_lock); spin_lock_irqsave(&qp->state_lock, flags);
if (!qp->valid || qp_state(qp) == IB_QPS_ERR || if (!qp->valid || qp_state(qp) == IB_QPS_ERR ||
qp_state(qp) == IB_QPS_RESET) { qp_state(qp) == IB_QPS_RESET) {
bool notify = qp->valid && (qp_state(qp) == IB_QPS_ERR); bool notify = qp->valid && (qp_state(qp) == IB_QPS_ERR);
drain_req_pkts(qp); drain_req_pkts(qp);
flush_recv_queue(qp, notify); flush_recv_queue(qp, notify);
spin_unlock_bh(&qp->state_lock); spin_unlock_irqrestore(&qp->state_lock, flags);
goto exit; goto exit;
} }
spin_unlock_bh(&qp->state_lock); spin_unlock_irqrestore(&qp->state_lock, flags);
qp->resp.aeth_syndrome = AETH_ACK_UNLIMITED; qp->resp.aeth_syndrome = AETH_ACK_UNLIMITED;
......
...@@ -904,10 +904,10 @@ static int rxe_post_send_kernel(struct rxe_qp *qp, ...@@ -904,10 +904,10 @@ static int rxe_post_send_kernel(struct rxe_qp *qp,
if (!err) if (!err)
rxe_sched_task(&qp->req.task); rxe_sched_task(&qp->req.task);
spin_lock_bh(&qp->state_lock); spin_lock_irqsave(&qp->state_lock, flags);
if (qp_state(qp) == IB_QPS_ERR) if (qp_state(qp) == IB_QPS_ERR)
rxe_sched_task(&qp->comp.task); rxe_sched_task(&qp->comp.task);
spin_unlock_bh(&qp->state_lock); spin_unlock_irqrestore(&qp->state_lock, flags);
return err; return err;
} }
...@@ -917,22 +917,23 @@ static int rxe_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr, ...@@ -917,22 +917,23 @@ static int rxe_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
{ {
struct rxe_qp *qp = to_rqp(ibqp); struct rxe_qp *qp = to_rqp(ibqp);
int err; int err;
unsigned long flags;
spin_lock_bh(&qp->state_lock); spin_lock_irqsave(&qp->state_lock, flags);
/* caller has already called destroy_qp */ /* caller has already called destroy_qp */
if (WARN_ON_ONCE(!qp->valid)) { if (WARN_ON_ONCE(!qp->valid)) {
spin_unlock_bh(&qp->state_lock); spin_unlock_irqrestore(&qp->state_lock, flags);
rxe_err_qp(qp, "qp has been destroyed"); rxe_err_qp(qp, "qp has been destroyed");
return -EINVAL; return -EINVAL;
} }
if (unlikely(qp_state(qp) < IB_QPS_RTS)) { if (unlikely(qp_state(qp) < IB_QPS_RTS)) {
spin_unlock_bh(&qp->state_lock); spin_unlock_irqrestore(&qp->state_lock, flags);
*bad_wr = wr; *bad_wr = wr;
rxe_err_qp(qp, "qp not ready to send"); rxe_err_qp(qp, "qp not ready to send");
return -EINVAL; return -EINVAL;
} }
spin_unlock_bh(&qp->state_lock); spin_unlock_irqrestore(&qp->state_lock, flags);
if (qp->is_user) { if (qp->is_user) {
/* Utilize process context to do protocol processing */ /* Utilize process context to do protocol processing */
...@@ -1008,22 +1009,22 @@ static int rxe_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr, ...@@ -1008,22 +1009,22 @@ static int rxe_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
struct rxe_rq *rq = &qp->rq; struct rxe_rq *rq = &qp->rq;
unsigned long flags; unsigned long flags;
spin_lock_bh(&qp->state_lock); spin_lock_irqsave(&qp->state_lock, flags);
/* caller has already called destroy_qp */ /* caller has already called destroy_qp */
if (WARN_ON_ONCE(!qp->valid)) { if (WARN_ON_ONCE(!qp->valid)) {
spin_unlock_bh(&qp->state_lock); spin_unlock_irqrestore(&qp->state_lock, flags);
rxe_err_qp(qp, "qp has been destroyed"); rxe_err_qp(qp, "qp has been destroyed");
return -EINVAL; return -EINVAL;
} }
/* see C10-97.2.1 */ /* see C10-97.2.1 */
if (unlikely((qp_state(qp) < IB_QPS_INIT))) { if (unlikely((qp_state(qp) < IB_QPS_INIT))) {
spin_unlock_bh(&qp->state_lock); spin_unlock_irqrestore(&qp->state_lock, flags);
*bad_wr = wr; *bad_wr = wr;
rxe_dbg_qp(qp, "qp not ready to post recv"); rxe_dbg_qp(qp, "qp not ready to post recv");
return -EINVAL; return -EINVAL;
} }
spin_unlock_bh(&qp->state_lock); spin_unlock_irqrestore(&qp->state_lock, flags);
if (unlikely(qp->srq)) { if (unlikely(qp->srq)) {
*bad_wr = wr; *bad_wr = wr;
...@@ -1044,10 +1045,10 @@ static int rxe_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr, ...@@ -1044,10 +1045,10 @@ static int rxe_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
spin_unlock_irqrestore(&rq->producer_lock, flags); spin_unlock_irqrestore(&rq->producer_lock, flags);
spin_lock_bh(&qp->state_lock); spin_lock_irqsave(&qp->state_lock, flags);
if (qp_state(qp) == IB_QPS_ERR) if (qp_state(qp) == IB_QPS_ERR)
rxe_sched_task(&qp->resp.task); rxe_sched_task(&qp->resp.task);
spin_unlock_bh(&qp->state_lock); spin_unlock_irqrestore(&qp->state_lock, flags);
return err; return err;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment