Commit 4ee97180 authored by Ralph Campbell's avatar Ralph Campbell Committed by Roland Dreier

IB/ipath: Change UD to queue work requests like RC & UC

The code to post UD sends tried to process work requests at the time
ib_post_send() is called without using a WQE queue.  This was fine as
long as HW resources were available for sending a packet.  This patch
changes UD to be handled more like RC and UC and shares more code.
Signed-off-by: default avatarRalph Campbell <ralph.campbell@qlogic.com>
Signed-off-by: default avatarRoland Dreier <rolandd@cisco.com>
parent 210d6ca3
...@@ -338,6 +338,7 @@ static void ipath_reset_qp(struct ipath_qp *qp) ...@@ -338,6 +338,7 @@ static void ipath_reset_qp(struct ipath_qp *qp)
qp->s_busy = 0; qp->s_busy = 0;
qp->s_flags &= IPATH_S_SIGNAL_REQ_WR; qp->s_flags &= IPATH_S_SIGNAL_REQ_WR;
qp->s_hdrwords = 0; qp->s_hdrwords = 0;
qp->s_wqe = NULL;
qp->s_psn = 0; qp->s_psn = 0;
qp->r_psn = 0; qp->r_psn = 0;
qp->r_msn = 0; qp->r_msn = 0;
...@@ -751,6 +752,9 @@ struct ib_qp *ipath_create_qp(struct ib_pd *ibpd, ...@@ -751,6 +752,9 @@ struct ib_qp *ipath_create_qp(struct ib_pd *ibpd,
switch (init_attr->qp_type) { switch (init_attr->qp_type) {
case IB_QPT_UC: case IB_QPT_UC:
case IB_QPT_RC: case IB_QPT_RC:
case IB_QPT_UD:
case IB_QPT_SMI:
case IB_QPT_GSI:
sz = sizeof(struct ipath_sge) * sz = sizeof(struct ipath_sge) *
init_attr->cap.max_send_sge + init_attr->cap.max_send_sge +
sizeof(struct ipath_swqe); sizeof(struct ipath_swqe);
...@@ -759,10 +763,6 @@ struct ib_qp *ipath_create_qp(struct ib_pd *ibpd, ...@@ -759,10 +763,6 @@ struct ib_qp *ipath_create_qp(struct ib_pd *ibpd,
ret = ERR_PTR(-ENOMEM); ret = ERR_PTR(-ENOMEM);
goto bail; goto bail;
} }
/* FALLTHROUGH */
case IB_QPT_UD:
case IB_QPT_SMI:
case IB_QPT_GSI:
sz = sizeof(*qp); sz = sizeof(*qp);
if (init_attr->srq) { if (init_attr->srq) {
struct ipath_srq *srq = to_isrq(init_attr->srq); struct ipath_srq *srq = to_isrq(init_attr->srq);
...@@ -805,8 +805,7 @@ struct ib_qp *ipath_create_qp(struct ib_pd *ibpd, ...@@ -805,8 +805,7 @@ struct ib_qp *ipath_create_qp(struct ib_pd *ibpd,
spin_lock_init(&qp->r_rq.lock); spin_lock_init(&qp->r_rq.lock);
atomic_set(&qp->refcount, 0); atomic_set(&qp->refcount, 0);
init_waitqueue_head(&qp->wait); init_waitqueue_head(&qp->wait);
tasklet_init(&qp->s_task, ipath_do_ruc_send, tasklet_init(&qp->s_task, ipath_do_send, (unsigned long)qp);
(unsigned long)qp);
INIT_LIST_HEAD(&qp->piowait); INIT_LIST_HEAD(&qp->piowait);
INIT_LIST_HEAD(&qp->timerwait); INIT_LIST_HEAD(&qp->timerwait);
qp->state = IB_QPS_RESET; qp->state = IB_QPS_RESET;
......
...@@ -81,9 +81,8 @@ static void ipath_init_restart(struct ipath_qp *qp, struct ipath_swqe *wqe) ...@@ -81,9 +81,8 @@ static void ipath_init_restart(struct ipath_qp *qp, struct ipath_swqe *wqe)
* Note that we are in the responder's side of the QP context. * Note that we are in the responder's side of the QP context.
* Note the QP s_lock must be held. * Note the QP s_lock must be held.
*/ */
static int ipath_make_rc_ack(struct ipath_qp *qp, static int ipath_make_rc_ack(struct ipath_ibdev *dev, struct ipath_qp *qp,
struct ipath_other_headers *ohdr, struct ipath_other_headers *ohdr, u32 pmtu)
u32 pmtu, u32 *bth0p, u32 *bth2p)
{ {
struct ipath_ack_entry *e; struct ipath_ack_entry *e;
u32 hwords; u32 hwords;
...@@ -192,8 +191,7 @@ static int ipath_make_rc_ack(struct ipath_qp *qp, ...@@ -192,8 +191,7 @@ static int ipath_make_rc_ack(struct ipath_qp *qp,
} }
qp->s_hdrwords = hwords; qp->s_hdrwords = hwords;
qp->s_cur_size = len; qp->s_cur_size = len;
*bth0p = bth0 | (1 << 22); /* Set M bit */ ipath_make_ruc_header(dev, qp, ohdr, bth0, bth2);
*bth2p = bth2;
return 1; return 1;
bail: bail:
...@@ -203,32 +201,39 @@ static int ipath_make_rc_ack(struct ipath_qp *qp, ...@@ -203,32 +201,39 @@ static int ipath_make_rc_ack(struct ipath_qp *qp,
/** /**
* ipath_make_rc_req - construct a request packet (SEND, RDMA r/w, ATOMIC) * ipath_make_rc_req - construct a request packet (SEND, RDMA r/w, ATOMIC)
* @qp: a pointer to the QP * @qp: a pointer to the QP
* @ohdr: a pointer to the IB header being constructed
* @pmtu: the path MTU
* @bth0p: pointer to the BTH opcode word
* @bth2p: pointer to the BTH PSN word
* *
* Return 1 if constructed; otherwise, return 0. * Return 1 if constructed; otherwise, return 0.
* Note the QP s_lock must be held and interrupts disabled.
*/ */
int ipath_make_rc_req(struct ipath_qp *qp, int ipath_make_rc_req(struct ipath_qp *qp)
struct ipath_other_headers *ohdr,
u32 pmtu, u32 *bth0p, u32 *bth2p)
{ {
struct ipath_ibdev *dev = to_idev(qp->ibqp.device); struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
struct ipath_other_headers *ohdr;
struct ipath_sge_state *ss; struct ipath_sge_state *ss;
struct ipath_swqe *wqe; struct ipath_swqe *wqe;
u32 hwords; u32 hwords;
u32 len; u32 len;
u32 bth0; u32 bth0;
u32 bth2; u32 bth2;
u32 pmtu = ib_mtu_enum_to_int(qp->path_mtu);
char newreq; char newreq;
unsigned long flags;
int ret = 0;
ohdr = &qp->s_hdr.u.oth;
if (qp->remote_ah_attr.ah_flags & IB_AH_GRH)
ohdr = &qp->s_hdr.u.l.oth;
/*
* The lock is needed to synchronize between the sending tasklet,
* the receive interrupt handler, and timeout resends.
*/
spin_lock_irqsave(&qp->s_lock, flags);
/* Sending responses has higher priority over sending requests. */ /* Sending responses has higher priority over sending requests. */
if ((qp->r_head_ack_queue != qp->s_tail_ack_queue || if ((qp->r_head_ack_queue != qp->s_tail_ack_queue ||
(qp->s_flags & IPATH_S_ACK_PENDING) || (qp->s_flags & IPATH_S_ACK_PENDING) ||
qp->s_ack_state != OP(ACKNOWLEDGE)) && qp->s_ack_state != OP(ACKNOWLEDGE)) &&
ipath_make_rc_ack(qp, ohdr, pmtu, bth0p, bth2p)) ipath_make_rc_ack(dev, qp, ohdr, pmtu))
goto done; goto done;
if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_SEND_OK) || if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_SEND_OK) ||
...@@ -560,13 +565,12 @@ int ipath_make_rc_req(struct ipath_qp *qp, ...@@ -560,13 +565,12 @@ int ipath_make_rc_req(struct ipath_qp *qp,
qp->s_hdrwords = hwords; qp->s_hdrwords = hwords;
qp->s_cur_sge = ss; qp->s_cur_sge = ss;
qp->s_cur_size = len; qp->s_cur_size = len;
*bth0p = bth0 | (qp->s_state << 24); ipath_make_ruc_header(dev, qp, ohdr, bth0 | (qp->s_state << 24), bth2);
*bth2p = bth2;
done: done:
return 1; ret = 1;
bail: bail:
return 0; spin_unlock_irqrestore(&qp->s_lock, flags);
return ret;
} }
/** /**
...@@ -627,7 +631,7 @@ static void send_rc_ack(struct ipath_qp *qp) ...@@ -627,7 +631,7 @@ static void send_rc_ack(struct ipath_qp *qp)
/* /*
* If we can send the ACK, clear the ACK state. * If we can send the ACK, clear the ACK state.
*/ */
if (ipath_verbs_send(dev->dd, hwords, (u32 *) &hdr, 0, NULL) == 0) { if (ipath_verbs_send(qp, &hdr, hwords, NULL, 0) == 0) {
dev->n_unicast_xmit++; dev->n_unicast_xmit++;
goto done; goto done;
} }
...@@ -757,7 +761,9 @@ void ipath_restart_rc(struct ipath_qp *qp, u32 psn, struct ib_wc *wc) ...@@ -757,7 +761,9 @@ void ipath_restart_rc(struct ipath_qp *qp, u32 psn, struct ib_wc *wc)
wc->vendor_err = 0; wc->vendor_err = 0;
wc->byte_len = 0; wc->byte_len = 0;
wc->qp = &qp->ibqp; wc->qp = &qp->ibqp;
wc->imm_data = 0;
wc->src_qp = qp->remote_qpn; wc->src_qp = qp->remote_qpn;
wc->wc_flags = 0;
wc->pkey_index = 0; wc->pkey_index = 0;
wc->slid = qp->remote_ah_attr.dlid; wc->slid = qp->remote_ah_attr.dlid;
wc->sl = qp->remote_ah_attr.sl; wc->sl = qp->remote_ah_attr.sl;
...@@ -1041,7 +1047,9 @@ static int do_rc_ack(struct ipath_qp *qp, u32 aeth, u32 psn, int opcode, ...@@ -1041,7 +1047,9 @@ static int do_rc_ack(struct ipath_qp *qp, u32 aeth, u32 psn, int opcode,
wc.vendor_err = 0; wc.vendor_err = 0;
wc.byte_len = 0; wc.byte_len = 0;
wc.qp = &qp->ibqp; wc.qp = &qp->ibqp;
wc.imm_data = 0;
wc.src_qp = qp->remote_qpn; wc.src_qp = qp->remote_qpn;
wc.wc_flags = 0;
wc.pkey_index = 0; wc.pkey_index = 0;
wc.slid = qp->remote_ah_attr.dlid; wc.slid = qp->remote_ah_attr.dlid;
wc.sl = qp->remote_ah_attr.sl; wc.sl = qp->remote_ah_attr.sl;
...@@ -1453,6 +1461,19 @@ static inline int ipath_rc_rcv_error(struct ipath_ibdev *dev, ...@@ -1453,6 +1461,19 @@ static inline int ipath_rc_rcv_error(struct ipath_ibdev *dev,
qp->r_ack_psn = qp->r_psn - 1; qp->r_ack_psn = qp->r_psn - 1;
goto send_ack; goto send_ack;
} }
/*
* Try to send a simple ACK to work around a Mellanox bug
* which doesn't accept a RDMA read response or atomic
* response as an ACK for earlier SENDs or RDMA writes.
*/
if (qp->r_head_ack_queue == qp->s_tail_ack_queue &&
!(qp->s_flags & IPATH_S_ACK_PENDING) &&
qp->s_ack_state == OP(ACKNOWLEDGE)) {
spin_unlock_irqrestore(&qp->s_lock, flags);
qp->r_nak_state = 0;
qp->r_ack_psn = qp->s_ack_queue[i].psn - 1;
goto send_ack;
}
/* /*
* Resend the RDMA read or atomic op which * Resend the RDMA read or atomic op which
* ACKs this duplicate request. * ACKs this duplicate request.
......
This diff is collapsed.
...@@ -37,72 +37,40 @@ ...@@ -37,72 +37,40 @@
/* cut down ridiculously long IB macro names */ /* cut down ridiculously long IB macro names */
#define OP(x) IB_OPCODE_UC_##x #define OP(x) IB_OPCODE_UC_##x
static void complete_last_send(struct ipath_qp *qp, struct ipath_swqe *wqe,
struct ib_wc *wc)
{
if (++qp->s_last == qp->s_size)
qp->s_last = 0;
if (!(qp->s_flags & IPATH_S_SIGNAL_REQ_WR) ||
(wqe->wr.send_flags & IB_SEND_SIGNALED)) {
wc->wr_id = wqe->wr.wr_id;
wc->status = IB_WC_SUCCESS;
wc->opcode = ib_ipath_wc_opcode[wqe->wr.opcode];
wc->vendor_err = 0;
wc->byte_len = wqe->length;
wc->qp = &qp->ibqp;
wc->src_qp = qp->remote_qpn;
wc->pkey_index = 0;
wc->slid = qp->remote_ah_attr.dlid;
wc->sl = qp->remote_ah_attr.sl;
wc->dlid_path_bits = 0;
wc->port_num = 0;
ipath_cq_enter(to_icq(qp->ibqp.send_cq), wc, 0);
}
}
/** /**
* ipath_make_uc_req - construct a request packet (SEND, RDMA write) * ipath_make_uc_req - construct a request packet (SEND, RDMA write)
* @qp: a pointer to the QP * @qp: a pointer to the QP
* @ohdr: a pointer to the IB header being constructed
* @pmtu: the path MTU
* @bth0p: pointer to the BTH opcode word
* @bth2p: pointer to the BTH PSN word
* *
* Return 1 if constructed; otherwise, return 0. * Return 1 if constructed; otherwise, return 0.
* Note the QP s_lock must be held and interrupts disabled.
*/ */
int ipath_make_uc_req(struct ipath_qp *qp, int ipath_make_uc_req(struct ipath_qp *qp)
struct ipath_other_headers *ohdr,
u32 pmtu, u32 *bth0p, u32 *bth2p)
{ {
struct ipath_other_headers *ohdr;
struct ipath_swqe *wqe; struct ipath_swqe *wqe;
u32 hwords; u32 hwords;
u32 bth0; u32 bth0;
u32 len; u32 len;
struct ib_wc wc; u32 pmtu = ib_mtu_enum_to_int(qp->path_mtu);
int ret = 0;
if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_SEND_OK)) if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_SEND_OK))
goto done; goto done;
ohdr = &qp->s_hdr.u.oth;
if (qp->remote_ah_attr.ah_flags & IB_AH_GRH)
ohdr = &qp->s_hdr.u.l.oth;
/* header size in 32-bit words LRH+BTH = (8+12)/4. */ /* header size in 32-bit words LRH+BTH = (8+12)/4. */
hwords = 5; hwords = 5;
bth0 = 1 << 22; /* Set M bit */ bth0 = 1 << 22; /* Set M bit */
/* Get the next send request. */ /* Get the next send request. */
wqe = get_swqe_ptr(qp, qp->s_last); wqe = get_swqe_ptr(qp, qp->s_cur);
qp->s_wqe = NULL;
switch (qp->s_state) { switch (qp->s_state) {
default: default:
/*
* Signal the completion of the last send
* (if there is one).
*/
if (qp->s_last != qp->s_tail) {
complete_last_send(qp, wqe, &wc);
wqe = get_swqe_ptr(qp, qp->s_last);
}
/* Check if send work queue is empty. */ /* Check if send work queue is empty. */
if (qp->s_tail == qp->s_head) if (qp->s_cur == qp->s_head)
goto done; goto done;
/* /*
* Start a new request. * Start a new request.
...@@ -131,6 +99,9 @@ int ipath_make_uc_req(struct ipath_qp *qp, ...@@ -131,6 +99,9 @@ int ipath_make_uc_req(struct ipath_qp *qp,
} }
if (wqe->wr.send_flags & IB_SEND_SOLICITED) if (wqe->wr.send_flags & IB_SEND_SOLICITED)
bth0 |= 1 << 23; bth0 |= 1 << 23;
qp->s_wqe = wqe;
if (++qp->s_cur >= qp->s_size)
qp->s_cur = 0;
break; break;
case IB_WR_RDMA_WRITE: case IB_WR_RDMA_WRITE:
...@@ -157,13 +128,14 @@ int ipath_make_uc_req(struct ipath_qp *qp, ...@@ -157,13 +128,14 @@ int ipath_make_uc_req(struct ipath_qp *qp,
if (wqe->wr.send_flags & IB_SEND_SOLICITED) if (wqe->wr.send_flags & IB_SEND_SOLICITED)
bth0 |= 1 << 23; bth0 |= 1 << 23;
} }
qp->s_wqe = wqe;
if (++qp->s_cur >= qp->s_size)
qp->s_cur = 0;
break; break;
default: default:
goto done; goto done;
} }
if (++qp->s_tail >= qp->s_size)
qp->s_tail = 0;
break; break;
case OP(SEND_FIRST): case OP(SEND_FIRST):
...@@ -185,6 +157,9 @@ int ipath_make_uc_req(struct ipath_qp *qp, ...@@ -185,6 +157,9 @@ int ipath_make_uc_req(struct ipath_qp *qp,
} }
if (wqe->wr.send_flags & IB_SEND_SOLICITED) if (wqe->wr.send_flags & IB_SEND_SOLICITED)
bth0 |= 1 << 23; bth0 |= 1 << 23;
qp->s_wqe = wqe;
if (++qp->s_cur >= qp->s_size)
qp->s_cur = 0;
break; break;
case OP(RDMA_WRITE_FIRST): case OP(RDMA_WRITE_FIRST):
...@@ -207,18 +182,22 @@ int ipath_make_uc_req(struct ipath_qp *qp, ...@@ -207,18 +182,22 @@ int ipath_make_uc_req(struct ipath_qp *qp,
if (wqe->wr.send_flags & IB_SEND_SOLICITED) if (wqe->wr.send_flags & IB_SEND_SOLICITED)
bth0 |= 1 << 23; bth0 |= 1 << 23;
} }
qp->s_wqe = wqe;
if (++qp->s_cur >= qp->s_size)
qp->s_cur = 0;
break; break;
} }
qp->s_len -= len; qp->s_len -= len;
qp->s_hdrwords = hwords; qp->s_hdrwords = hwords;
qp->s_cur_sge = &qp->s_sge; qp->s_cur_sge = &qp->s_sge;
qp->s_cur_size = len; qp->s_cur_size = len;
*bth0p = bth0 | (qp->s_state << 24); ipath_make_ruc_header(to_idev(qp->ibqp.device),
*bth2p = qp->s_next_psn++ & IPATH_PSN_MASK; qp, ohdr, bth0 | (qp->s_state << 24),
return 1; qp->s_next_psn++ & IPATH_PSN_MASK);
ret = 1;
done: done:
return 0; return ret;
} }
/** /**
......
This diff is collapsed.
This diff is collapsed.
...@@ -42,6 +42,8 @@ ...@@ -42,6 +42,8 @@
#include <rdma/ib_pack.h> #include <rdma/ib_pack.h>
#include <rdma/ib_user_verbs.h> #include <rdma/ib_user_verbs.h>
#include "ipath_kernel.h"
#define IPATH_MAX_RDMA_ATOMIC 4 #define IPATH_MAX_RDMA_ATOMIC 4
#define QPN_MAX (1 << 24) #define QPN_MAX (1 << 24)
...@@ -59,6 +61,7 @@ ...@@ -59,6 +61,7 @@
*/ */
#define IB_CQ_NONE (IB_CQ_NEXT_COMP + 1) #define IB_CQ_NONE (IB_CQ_NEXT_COMP + 1)
/* AETH NAK opcode values */
#define IB_RNR_NAK 0x20 #define IB_RNR_NAK 0x20
#define IB_NAK_PSN_ERROR 0x60 #define IB_NAK_PSN_ERROR 0x60
#define IB_NAK_INVALID_REQUEST 0x61 #define IB_NAK_INVALID_REQUEST 0x61
...@@ -66,6 +69,7 @@ ...@@ -66,6 +69,7 @@
#define IB_NAK_REMOTE_OPERATIONAL_ERROR 0x63 #define IB_NAK_REMOTE_OPERATIONAL_ERROR 0x63
#define IB_NAK_INVALID_RD_REQUEST 0x64 #define IB_NAK_INVALID_RD_REQUEST 0x64
/* Flags for checking QP state (see ib_ipath_state_ops[]) */
#define IPATH_POST_SEND_OK 0x01 #define IPATH_POST_SEND_OK 0x01
#define IPATH_POST_RECV_OK 0x02 #define IPATH_POST_RECV_OK 0x02
#define IPATH_PROCESS_RECV_OK 0x04 #define IPATH_PROCESS_RECV_OK 0x04
...@@ -239,7 +243,7 @@ struct ipath_mregion { ...@@ -239,7 +243,7 @@ struct ipath_mregion {
*/ */
struct ipath_sge { struct ipath_sge {
struct ipath_mregion *mr; struct ipath_mregion *mr;
void *vaddr; /* current pointer into the segment */ void *vaddr; /* kernel virtual address of segment */
u32 sge_length; /* length of the SGE */ u32 sge_length; /* length of the SGE */
u32 length; /* remaining length of the segment */ u32 length; /* remaining length of the segment */
u16 m; /* current index: mr->map[m] */ u16 m; /* current index: mr->map[m] */
...@@ -407,6 +411,7 @@ struct ipath_qp { ...@@ -407,6 +411,7 @@ struct ipath_qp {
u32 s_ssn; /* SSN of tail entry */ u32 s_ssn; /* SSN of tail entry */
u32 s_lsn; /* limit sequence number (credit) */ u32 s_lsn; /* limit sequence number (credit) */
struct ipath_swqe *s_wq; /* send work queue */ struct ipath_swqe *s_wq; /* send work queue */
struct ipath_swqe *s_wqe;
struct ipath_rq r_rq; /* receive work queue */ struct ipath_rq r_rq; /* receive work queue */
struct ipath_sge r_sg_list[0]; /* verified SGEs */ struct ipath_sge r_sg_list[0]; /* verified SGEs */
}; };
...@@ -683,8 +688,8 @@ void ipath_sqerror_qp(struct ipath_qp *qp, struct ib_wc *wc); ...@@ -683,8 +688,8 @@ void ipath_sqerror_qp(struct ipath_qp *qp, struct ib_wc *wc);
void ipath_get_credit(struct ipath_qp *qp, u32 aeth); void ipath_get_credit(struct ipath_qp *qp, u32 aeth);
int ipath_verbs_send(struct ipath_devdata *dd, u32 hdrwords, int ipath_verbs_send(struct ipath_qp *qp, struct ipath_ib_header *hdr,
u32 *hdr, u32 len, struct ipath_sge_state *ss); u32 hdrwords, struct ipath_sge_state *ss, u32 len);
void ipath_cq_enter(struct ipath_cq *cq, struct ib_wc *entry, int sig); void ipath_cq_enter(struct ipath_cq *cq, struct ib_wc *entry, int sig);
...@@ -692,8 +697,6 @@ void ipath_copy_sge(struct ipath_sge_state *ss, void *data, u32 length); ...@@ -692,8 +697,6 @@ void ipath_copy_sge(struct ipath_sge_state *ss, void *data, u32 length);
void ipath_skip_sge(struct ipath_sge_state *ss, u32 length); void ipath_skip_sge(struct ipath_sge_state *ss, u32 length);
int ipath_post_ruc_send(struct ipath_qp *qp, struct ib_send_wr *wr);
void ipath_uc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr, void ipath_uc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
int has_grh, void *data, u32 tlen, struct ipath_qp *qp); int has_grh, void *data, u32 tlen, struct ipath_qp *qp);
...@@ -733,6 +736,8 @@ int ipath_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr); ...@@ -733,6 +736,8 @@ int ipath_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr);
int ipath_destroy_srq(struct ib_srq *ibsrq); int ipath_destroy_srq(struct ib_srq *ibsrq);
void ipath_cq_enter(struct ipath_cq *cq, struct ib_wc *entry, int sig);
int ipath_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry); int ipath_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry);
struct ib_cq *ipath_create_cq(struct ib_device *ibdev, int entries, int comp_vector, struct ib_cq *ipath_create_cq(struct ib_device *ibdev, int entries, int comp_vector,
...@@ -782,18 +787,28 @@ int ipath_mmap(struct ib_ucontext *context, struct vm_area_struct *vma); ...@@ -782,18 +787,28 @@ int ipath_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
void ipath_insert_rnr_queue(struct ipath_qp *qp); void ipath_insert_rnr_queue(struct ipath_qp *qp);
int ipath_init_sge(struct ipath_qp *qp, struct ipath_rwqe *wqe,
u32 *lengthp, struct ipath_sge_state *ss);
int ipath_get_rwqe(struct ipath_qp *qp, int wr_id_only); int ipath_get_rwqe(struct ipath_qp *qp, int wr_id_only);
u32 ipath_make_grh(struct ipath_ibdev *dev, struct ib_grh *hdr, u32 ipath_make_grh(struct ipath_ibdev *dev, struct ib_grh *hdr,
struct ib_global_route *grh, u32 hwords, u32 nwords); struct ib_global_route *grh, u32 hwords, u32 nwords);
void ipath_do_ruc_send(unsigned long data); void ipath_make_ruc_header(struct ipath_ibdev *dev, struct ipath_qp *qp,
struct ipath_other_headers *ohdr,
u32 bth0, u32 bth2);
void ipath_do_send(unsigned long data);
void ipath_send_complete(struct ipath_qp *qp, struct ipath_swqe *wqe,
enum ib_wc_status status);
int ipath_make_rc_req(struct ipath_qp *qp);
int ipath_make_rc_req(struct ipath_qp *qp, struct ipath_other_headers *ohdr, int ipath_make_uc_req(struct ipath_qp *qp);
u32 pmtu, u32 *bth0p, u32 *bth2p);
int ipath_make_uc_req(struct ipath_qp *qp, struct ipath_other_headers *ohdr, int ipath_make_ud_req(struct ipath_qp *qp);
u32 pmtu, u32 *bth0p, u32 *bth2p);
int ipath_register_ib_device(struct ipath_devdata *); int ipath_register_ib_device(struct ipath_devdata *);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment