Commit d205a06a authored by Kaike Wan's avatar Kaike Wan Committed by Jason Gunthorpe

IB/rdmavt: Rename check_send_wqe as setup_wqe

The driver-provided function check_send_wqe allows the hardware driver to
check and set up the incoming send wqe before it is inserted into the swqe
ring. This patch will rename it as setup_wqe to better reflect its
usage. In addition, this function is only called when all setup is
complete in rdmavt.
Reviewed-by: default avatarMike Marciniszyn <mike.marciniszyn@intel.com>
Signed-off-by: default avatarKaike Wan <kaike.wan@intel.com>
Signed-off-by: default avatarDennis Dalessandro <dennis.dalessandro@intel.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@mellanox.com>
parent 8c31c918
...@@ -282,16 +282,21 @@ void hfi1_modify_qp(struct rvt_qp *qp, struct ib_qp_attr *attr, ...@@ -282,16 +282,21 @@ void hfi1_modify_qp(struct rvt_qp *qp, struct ib_qp_attr *attr,
} }
/** /**
* hfi1_check_send_wqe - validate wqe * hfi1_setup_wqe - set up the wqe
* @qp - The qp * @qp - The qp
* @wqe - The built wqe * @wqe - The built wqe
* @call_send - Determine if the send should be posted or scheduled. * @call_send - Determine if the send should be posted or scheduled.
* *
* Perform setup of the wqe. This is called
* prior to inserting the wqe into the ring but after
* the wqe has been setup by RDMAVT. This function
* allows the driver the opportunity to perform
* validation and additional setup of the wqe.
*
* Returns 0 on success, -EINVAL on failure * Returns 0 on success, -EINVAL on failure
* *
*/ */
int hfi1_check_send_wqe(struct rvt_qp *qp, int hfi1_setup_wqe(struct rvt_qp *qp, struct rvt_swqe *wqe, bool *call_send)
struct rvt_swqe *wqe, bool *call_send)
{ {
struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
struct rvt_ah *ah; struct rvt_ah *ah;
......
...@@ -1937,7 +1937,7 @@ int hfi1_register_ib_device(struct hfi1_devdata *dd) ...@@ -1937,7 +1937,7 @@ int hfi1_register_ib_device(struct hfi1_devdata *dd)
dd->verbs_dev.rdi.driver_f.check_modify_qp = hfi1_check_modify_qp; dd->verbs_dev.rdi.driver_f.check_modify_qp = hfi1_check_modify_qp;
dd->verbs_dev.rdi.driver_f.modify_qp = hfi1_modify_qp; dd->verbs_dev.rdi.driver_f.modify_qp = hfi1_modify_qp;
dd->verbs_dev.rdi.driver_f.notify_restart_rc = hfi1_restart_rc; dd->verbs_dev.rdi.driver_f.notify_restart_rc = hfi1_restart_rc;
dd->verbs_dev.rdi.driver_f.check_send_wqe = hfi1_check_send_wqe; dd->verbs_dev.rdi.driver_f.setup_wqe = hfi1_setup_wqe;
dd->verbs_dev.rdi.driver_f.comp_vect_cpu_lookup = dd->verbs_dev.rdi.driver_f.comp_vect_cpu_lookup =
hfi1_comp_vect_mappings_lookup; hfi1_comp_vect_mappings_lookup;
......
...@@ -343,7 +343,7 @@ int hfi1_check_modify_qp(struct rvt_qp *qp, struct ib_qp_attr *attr, ...@@ -343,7 +343,7 @@ int hfi1_check_modify_qp(struct rvt_qp *qp, struct ib_qp_attr *attr,
void hfi1_modify_qp(struct rvt_qp *qp, struct ib_qp_attr *attr, void hfi1_modify_qp(struct rvt_qp *qp, struct ib_qp_attr *attr,
int attr_mask, struct ib_udata *udata); int attr_mask, struct ib_udata *udata);
void hfi1_restart_rc(struct rvt_qp *qp, u32 psn, int wait); void hfi1_restart_rc(struct rvt_qp *qp, u32 psn, int wait);
int hfi1_check_send_wqe(struct rvt_qp *qp, struct rvt_swqe *wqe, int hfi1_setup_wqe(struct rvt_qp *qp, struct rvt_swqe *wqe,
bool *call_send); bool *call_send);
extern const u32 rc_only_opcode; extern const u32 rc_only_opcode;
......
...@@ -1588,7 +1588,7 @@ int qib_register_ib_device(struct qib_devdata *dd) ...@@ -1588,7 +1588,7 @@ int qib_register_ib_device(struct qib_devdata *dd)
dd->verbs_dev.rdi.driver_f.port_callback = qib_create_port_files; dd->verbs_dev.rdi.driver_f.port_callback = qib_create_port_files;
dd->verbs_dev.rdi.driver_f.get_pci_dev = qib_get_pci_dev; dd->verbs_dev.rdi.driver_f.get_pci_dev = qib_get_pci_dev;
dd->verbs_dev.rdi.driver_f.check_ah = qib_check_ah; dd->verbs_dev.rdi.driver_f.check_ah = qib_check_ah;
dd->verbs_dev.rdi.driver_f.check_send_wqe = qib_check_send_wqe; dd->verbs_dev.rdi.driver_f.setup_wqe = qib_check_send_wqe;
dd->verbs_dev.rdi.driver_f.notify_new_ah = qib_notify_new_ah; dd->verbs_dev.rdi.driver_f.notify_new_ah = qib_notify_new_ah;
dd->verbs_dev.rdi.driver_f.alloc_qpn = qib_alloc_qpn; dd->verbs_dev.rdi.driver_f.alloc_qpn = qib_alloc_qpn;
dd->verbs_dev.rdi.driver_f.qp_priv_alloc = qib_qp_priv_alloc; dd->verbs_dev.rdi.driver_f.qp_priv_alloc = qib_qp_priv_alloc;
......
...@@ -1823,13 +1823,11 @@ static int rvt_post_one_wr(struct rvt_qp *qp, ...@@ -1823,13 +1823,11 @@ static int rvt_post_one_wr(struct rvt_qp *qp,
wqe->wr.num_sge = j; wqe->wr.num_sge = j;
} }
/* general part of wqe valid - allow for driver checks */ /*
if (rdi->driver_f.check_send_wqe) { * Calculate and set SWQE PSN values prior to handing it off
ret = rdi->driver_f.check_send_wqe(qp, wqe, call_send); * to the driver's check routine. This give the driver the
if (ret < 0) * opportunity to adjust PSN values based on internal checks.
goto bail_inval_free; */
}
log_pmtu = qp->log_pmtu; log_pmtu = qp->log_pmtu;
if (qp->ibqp.qp_type != IB_QPT_UC && if (qp->ibqp.qp_type != IB_QPT_UC &&
qp->ibqp.qp_type != IB_QPT_RC) { qp->ibqp.qp_type != IB_QPT_RC) {
...@@ -1854,8 +1852,18 @@ static int rvt_post_one_wr(struct rvt_qp *qp, ...@@ -1854,8 +1852,18 @@ static int rvt_post_one_wr(struct rvt_qp *qp,
(wqe->length ? (wqe->length ?
((wqe->length - 1) >> log_pmtu) : ((wqe->length - 1) >> log_pmtu) :
0); 0);
qp->s_next_psn = wqe->lpsn + 1;
} }
/* general part of wqe valid - allow for driver checks */
if (rdi->driver_f.setup_wqe) {
ret = rdi->driver_f.setup_wqe(qp, wqe, call_send);
if (ret < 0)
goto bail_inval_free_ref;
}
if (!(rdi->post_parms[wr->opcode].flags & RVT_OPERATION_LOCAL))
qp->s_next_psn = wqe->lpsn + 1;
if (unlikely(reserved_op)) { if (unlikely(reserved_op)) {
wqe->wr.send_flags |= RVT_SEND_RESERVE_USED; wqe->wr.send_flags |= RVT_SEND_RESERVE_USED;
rvt_qp_wqe_reserve(qp, wqe); rvt_qp_wqe_reserve(qp, wqe);
...@@ -1869,6 +1877,10 @@ static int rvt_post_one_wr(struct rvt_qp *qp, ...@@ -1869,6 +1877,10 @@ static int rvt_post_one_wr(struct rvt_qp *qp,
return 0; return 0;
bail_inval_free_ref:
if (qp->ibqp.qp_type != IB_QPT_UC &&
qp->ibqp.qp_type != IB_QPT_RC)
atomic_dec(&ibah_to_rvtah(ud_wr(wr)->ah)->refcount);
bail_inval_free: bail_inval_free:
/* release mr holds */ /* release mr holds */
while (j) { while (j) {
......
...@@ -215,12 +215,13 @@ struct rvt_driver_provided { ...@@ -215,12 +215,13 @@ struct rvt_driver_provided {
void (*schedule_send_no_lock)(struct rvt_qp *qp); void (*schedule_send_no_lock)(struct rvt_qp *qp);
/* /*
* Validate the wqe. This needs to be done prior to inserting the * Driver specific work request setup and checking.
* wqe into the ring, but after the wqe has been set up. Allow for * This function is allowed to perform any setup, checks, or
* driver specific work request checking by providing a callback. * adjustments required to the SWQE in order to be usable by
* call_send indicates if the wqe should be posted or scheduled. * underlying protocols. This includes private data structure
* allocations.
*/ */
int (*check_send_wqe)(struct rvt_qp *qp, struct rvt_swqe *wqe, int (*setup_wqe)(struct rvt_qp *qp, struct rvt_swqe *wqe,
bool *call_send); bool *call_send);
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment