Commit 5e7c1b75 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma

Pull rdma fixes from Jason Gunthorpe:
 "First RDMA subsystem updates for 5.5-rc. A very small set of fixes,
  most people seem to still be recovering from December!

  Five small driver fixes:

   - Fix error flow with MR allocation in bnxt_re

   - An errata work around for bnxt_re

   - Misuse of the workqueue API in hfi1

   - Protocol error in hfi1

   - Regression in 5.5 related to the mmap rework with i40iw"

* tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma:
  i40iw: Remove setting of VMA private data and use rdma_user_mmap_io
  IB/hfi1: Adjust flow PSN with the correct resync_psn
  IB/hfi1: Don't cancel unused work item
  RDMA/bnxt_re: Fix Send Work Entry state check while polling completions
  RDMA/bnxt_re: Avoid freeing MR resources if dereg fails
parents 4a3033ef 9554de39
...@@ -3305,8 +3305,10 @@ int bnxt_re_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata) ...@@ -3305,8 +3305,10 @@ int bnxt_re_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata)
int rc; int rc;
rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr); rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
if (rc) if (rc) {
dev_err(rdev_to_dev(rdev), "Dereg MR failed: %#x\n", rc); dev_err(rdev_to_dev(rdev), "Dereg MR failed: %#x\n", rc);
return rc;
}
if (mr->pages) { if (mr->pages) {
rc = bnxt_qplib_free_fast_reg_page_list(&rdev->qplib_res, rc = bnxt_qplib_free_fast_reg_page_list(&rdev->qplib_res,
......
...@@ -2283,13 +2283,13 @@ static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq, ...@@ -2283,13 +2283,13 @@ static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq,
/* Add qp to flush list of the CQ */ /* Add qp to flush list of the CQ */
bnxt_qplib_add_flush_qp(qp); bnxt_qplib_add_flush_qp(qp);
} else { } else {
if (swq->flags & SQ_SEND_FLAGS_SIGNAL_COMP) {
/* Before we complete, do WA 9060 */ /* Before we complete, do WA 9060 */
if (do_wa9060(qp, cq, cq_cons, sw_sq_cons, if (do_wa9060(qp, cq, cq_cons, sw_sq_cons,
cqe_sq_cons)) { cqe_sq_cons)) {
*lib_qp = qp; *lib_qp = qp;
goto out; goto out;
} }
if (swq->flags & SQ_SEND_FLAGS_SIGNAL_COMP) {
cqe->status = CQ_REQ_STATUS_OK; cqe->status = CQ_REQ_STATUS_OK;
cqe++; cqe++;
(*budget)--; (*budget)--;
......
...@@ -81,6 +81,8 @@ void iowait_init(struct iowait *wait, u32 tx_limit, ...@@ -81,6 +81,8 @@ void iowait_init(struct iowait *wait, u32 tx_limit,
void iowait_cancel_work(struct iowait *w) void iowait_cancel_work(struct iowait *w)
{ {
cancel_work_sync(&iowait_get_ib_work(w)->iowork); cancel_work_sync(&iowait_get_ib_work(w)->iowork);
/* Make sure that the iowork for TID RDMA is used */
if (iowait_get_tid_work(w)->iowork.func)
cancel_work_sync(&iowait_get_tid_work(w)->iowork); cancel_work_sync(&iowait_get_tid_work(w)->iowork);
} }
......
...@@ -4633,6 +4633,15 @@ void hfi1_rc_rcv_tid_rdma_ack(struct hfi1_packet *packet) ...@@ -4633,6 +4633,15 @@ void hfi1_rc_rcv_tid_rdma_ack(struct hfi1_packet *packet)
*/ */
fpsn = full_flow_psn(flow, flow->flow_state.spsn); fpsn = full_flow_psn(flow, flow->flow_state.spsn);
req->r_ack_psn = psn; req->r_ack_psn = psn;
/*
* If resync_psn points to the last flow PSN for a
* segment and the new segment (likely from a new
* request) starts with a new generation number, we
* need to adjust resync_psn accordingly.
*/
if (flow->flow_state.generation !=
(resync_psn >> HFI1_KDETH_BTH_SEQ_SHIFT))
resync_psn = mask_psn(fpsn - 1);
flow->resync_npkts += flow->resync_npkts +=
delta_psn(mask_psn(resync_psn + 1), fpsn); delta_psn(mask_psn(resync_psn + 1), fpsn);
/* /*
......
...@@ -169,8 +169,7 @@ static void i40iw_dealloc_ucontext(struct ib_ucontext *context) ...@@ -169,8 +169,7 @@ static void i40iw_dealloc_ucontext(struct ib_ucontext *context)
static int i40iw_mmap(struct ib_ucontext *context, struct vm_area_struct *vma) static int i40iw_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
{ {
struct i40iw_ucontext *ucontext; struct i40iw_ucontext *ucontext;
u64 db_addr_offset; u64 db_addr_offset, push_offset, pfn;
u64 push_offset;
ucontext = to_ucontext(context); ucontext = to_ucontext(context);
if (ucontext->iwdev->sc_dev.is_pf) { if (ucontext->iwdev->sc_dev.is_pf) {
...@@ -189,7 +188,6 @@ static int i40iw_mmap(struct ib_ucontext *context, struct vm_area_struct *vma) ...@@ -189,7 +188,6 @@ static int i40iw_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
if (vma->vm_pgoff == (db_addr_offset >> PAGE_SHIFT)) { if (vma->vm_pgoff == (db_addr_offset >> PAGE_SHIFT)) {
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
vma->vm_private_data = ucontext;
} else { } else {
if ((vma->vm_pgoff - (push_offset >> PAGE_SHIFT)) % 2) if ((vma->vm_pgoff - (push_offset >> PAGE_SHIFT)) % 2)
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
...@@ -197,12 +195,12 @@ static int i40iw_mmap(struct ib_ucontext *context, struct vm_area_struct *vma) ...@@ -197,12 +195,12 @@ static int i40iw_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
} }
if (io_remap_pfn_range(vma, vma->vm_start, pfn = vma->vm_pgoff +
vma->vm_pgoff + (pci_resource_start(ucontext->iwdev->ldev->pcidev, 0) >> PAGE_SHIFT), (pci_resource_start(ucontext->iwdev->ldev->pcidev, 0) >>
PAGE_SIZE, vma->vm_page_prot)) PAGE_SHIFT);
return -EAGAIN;
return 0; return rdma_user_mmap_io(context, vma, pfn, PAGE_SIZE,
vma->vm_page_prot, NULL);
} }
/** /**
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment