Commit 07b92370 authored by Kaike Wan's avatar Kaike Wan Committed by Doug Ledford

IB/hfi1: Add functions to receive TID RDMA WRITE request

This patch adds the functions to receive TID RDMA WRITE request. The
request will be stored in the QP's s_ack_queue. This patch also adds
code to handle duplicate TID RDMA WRITE request and a function to
allocate TID resources for data receiving on the responder side.
Signed-off-by: default avatarMitko Haralanov <mitko.haralanov@intel.com>
Signed-off-by: default avatarMike Marciniszyn <mike.marciniszyn@intel.com>
Signed-off-by: default avatarAshutosh Dixit <ashutosh.dixit@intel.com>
Signed-off-by: default avatarKaike Wan <kaike.wan@intel.com>
Signed-off-by: default avatarDennis Dalessandro <dennis.dalessandro@intel.com>
Signed-off-by: default avatarDoug Ledford <dledford@redhat.com>
parent 4f9264d1
...@@ -1512,6 +1512,7 @@ static int __init hfi1_mod_init(void) ...@@ -1512,6 +1512,7 @@ static int __init hfi1_mod_init(void)
goto bail_dev; goto bail_dev;
} }
hfi1_compute_tid_rdma_flow_wt();
/* /*
* These must be called before the driver is registered with * These must be called before the driver is registered with
* the PCI subsystem. * the PCI subsystem.
......
...@@ -2411,6 +2411,7 @@ void hfi1_rc_rcv(struct hfi1_packet *packet) ...@@ -2411,6 +2411,7 @@ void hfi1_rc_rcv(struct hfi1_packet *packet)
void *data = packet->payload; void *data = packet->payload;
u32 tlen = packet->tlen; u32 tlen = packet->tlen;
struct rvt_qp *qp = packet->qp; struct rvt_qp *qp = packet->qp;
struct hfi1_qp_priv *qpriv = qp->priv;
struct hfi1_ibport *ibp = rcd_to_iport(rcd); struct hfi1_ibport *ibp = rcd_to_iport(rcd);
struct ib_other_headers *ohdr = packet->ohdr; struct ib_other_headers *ohdr = packet->ohdr;
u32 opcode = packet->opcode; u32 opcode = packet->opcode;
...@@ -2716,6 +2717,7 @@ void hfi1_rc_rcv(struct hfi1_packet *packet) ...@@ -2716,6 +2717,7 @@ void hfi1_rc_rcv(struct hfi1_packet *packet)
qp->r_state = opcode; qp->r_state = opcode;
qp->r_nak_state = 0; qp->r_nak_state = 0;
qp->r_head_ack_queue = next; qp->r_head_ack_queue = next;
qpriv->r_tid_alloc = qp->r_head_ack_queue;
/* Schedule the send engine. */ /* Schedule the send engine. */
qp->s_flags |= RVT_S_RESP_PENDING; qp->s_flags |= RVT_S_RESP_PENDING;
...@@ -2789,6 +2791,7 @@ void hfi1_rc_rcv(struct hfi1_packet *packet) ...@@ -2789,6 +2791,7 @@ void hfi1_rc_rcv(struct hfi1_packet *packet)
qp->r_state = opcode; qp->r_state = opcode;
qp->r_nak_state = 0; qp->r_nak_state = 0;
qp->r_head_ack_queue = next; qp->r_head_ack_queue = next;
qpriv->r_tid_alloc = qp->r_head_ack_queue;
/* Schedule the send engine. */ /* Schedule the send engine. */
qp->s_flags |= RVT_S_RESP_PENDING; qp->s_flags |= RVT_S_RESP_PENDING;
......
This diff is collapsed.
...@@ -26,7 +26,9 @@ ...@@ -26,7 +26,9 @@
* *
* HFI1_S_TID_WAIT_INTERLCK - QP is waiting for requester interlock * HFI1_S_TID_WAIT_INTERLCK - QP is waiting for requester interlock
*/ */
/* BIT(4) reserved for RVT_S_ACK_PENDING. */
#define HFI1_S_TID_WAIT_INTERLCK BIT(5) #define HFI1_S_TID_WAIT_INTERLCK BIT(5)
#define HFI1_R_TID_SW_PSN BIT(19)
/* /*
* Unlike regular IB RDMA VERBS, which do not require an entry * Unlike regular IB RDMA VERBS, which do not require an entry
...@@ -89,10 +91,12 @@ struct tid_rdma_request { ...@@ -89,10 +91,12 @@ struct tid_rdma_request {
} e; } e;
struct tid_rdma_flow *flows; /* array of tid flows */ struct tid_rdma_flow *flows; /* array of tid flows */
struct rvt_sge_state ss; /* SGE state for TID RDMA requests */
u16 n_flows; /* size of the flow buffer window */ u16 n_flows; /* size of the flow buffer window */
u16 setup_head; /* flow index we are setting up */ u16 setup_head; /* flow index we are setting up */
u16 clear_tail; /* flow index we are clearing */ u16 clear_tail; /* flow index we are clearing */
u16 flow_idx; /* flow index most recently set up */ u16 flow_idx; /* flow index most recently set up */
u16 acked_tail;
u32 seg_len; u32 seg_len;
u32 total_len; u32 total_len;
...@@ -103,6 +107,7 @@ struct tid_rdma_request { ...@@ -103,6 +107,7 @@ struct tid_rdma_request {
u32 cur_seg; /* index of current segment */ u32 cur_seg; /* index of current segment */
u32 comp_seg; /* index of last completed segment */ u32 comp_seg; /* index of last completed segment */
u32 ack_seg; /* index of last ack'ed segment */ u32 ack_seg; /* index of last ack'ed segment */
u32 alloc_seg; /* index of next segment to be allocated */
u32 isge; /* index of "current" sge */ u32 isge; /* index of "current" sge */
u32 ack_pending; /* num acks pending for this request */ u32 ack_pending; /* num acks pending for this request */
...@@ -174,6 +179,12 @@ struct tid_rdma_flow { ...@@ -174,6 +179,12 @@ struct tid_rdma_flow {
u32 tid_entry[TID_RDMA_MAX_PAGES]; u32 tid_entry[TID_RDMA_MAX_PAGES];
}; };
enum tid_rnr_nak_state {
TID_RNR_NAK_INIT = 0,
TID_RNR_NAK_SEND,
TID_RNR_NAK_SENT,
};
bool tid_rdma_conn_req(struct rvt_qp *qp, u64 *data); bool tid_rdma_conn_req(struct rvt_qp *qp, u64 *data);
bool tid_rdma_conn_reply(struct rvt_qp *qp, u64 data); bool tid_rdma_conn_reply(struct rvt_qp *qp, u64 data);
bool tid_rdma_conn_resp(struct rvt_qp *qp, u64 *data); bool tid_rdma_conn_resp(struct rvt_qp *qp, u64 *data);
...@@ -247,4 +258,9 @@ static inline void hfi1_setup_tid_rdma_wqe(struct rvt_qp *qp, ...@@ -247,4 +258,9 @@ static inline void hfi1_setup_tid_rdma_wqe(struct rvt_qp *qp,
u32 hfi1_build_tid_rdma_write_req(struct rvt_qp *qp, struct rvt_swqe *wqe, u32 hfi1_build_tid_rdma_write_req(struct rvt_qp *qp, struct rvt_swqe *wqe,
struct ib_other_headers *ohdr, struct ib_other_headers *ohdr,
u32 *bth1, u32 *bth2, u32 *len); u32 *bth1, u32 *bth2, u32 *len);
void hfi1_compute_tid_rdma_flow_wt(void);
void hfi1_rc_rcv_tid_rdma_write_req(struct hfi1_packet *packet);
#endif /* HFI1_TID_RDMA_H */ #endif /* HFI1_TID_RDMA_H */
...@@ -172,7 +172,15 @@ struct hfi1_qp_priv { ...@@ -172,7 +172,15 @@ struct hfi1_qp_priv {
unsigned long tid_timer_timeout_jiffies; unsigned long tid_timer_timeout_jiffies;
/* variables for the TID RDMA SE state machine */ /* variables for the TID RDMA SE state machine */
u8 rnr_nak_state; /* RNR NAK state */
u32 s_flags; u32 s_flags;
u32 r_tid_head; /* Most recently added TID RDMA request */
u32 r_tid_tail; /* the last completed TID RDMA request */
u32 r_tid_ack; /* the TID RDMA request to be ACK'ed */
u32 r_tid_alloc; /* Request for which we are allocating resources */
u32 pending_tid_w_segs; /* Num of pending tid write segments */
u32 alloc_w_segs; /* Number of segments for which write */
/* resources have been allocated for this QP */
/* For TID RDMA READ */ /* For TID RDMA READ */
u32 tid_r_reqs; /* Num of tid reads requested */ u32 tid_r_reqs; /* Num of tid reads requested */
...@@ -180,8 +188,12 @@ struct hfi1_qp_priv { ...@@ -180,8 +188,12 @@ struct hfi1_qp_priv {
u32 pending_tid_r_segs; /* Num of pending tid read segments */ u32 pending_tid_r_segs; /* Num of pending tid read segments */
u16 pkts_ps; /* packets per segment */ u16 pkts_ps; /* packets per segment */
u8 timeout_shift; /* account for number of packets per segment */ u8 timeout_shift; /* account for number of packets per segment */
u8 sync_pt; /* Set when QP reaches sync point */
}; };
#define HFI1_QP_WQE_INVALID ((u32)-1)
struct hfi1_swqe_priv { struct hfi1_swqe_priv {
struct tid_rdma_request tid_req; struct tid_rdma_request tid_req;
struct rvt_sge_state ss; /* Used for TID RDMA READ Request */ struct rvt_sge_state ss; /* Used for TID RDMA READ Request */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment