Commit 4f9264d1 authored by Kaike Wan's avatar Kaike Wan Committed by Doug Ledford

IB/hfi1: Add an s_acked_ack_queue pointer

The s_ack_queue is managed by two pointers into the ring:
r_head_ack_queue and s_tail_ack_queue. r_head_ack_queue is the index of
where the next received request is going to be placed and s_tail_ack_queue
is the entry of the request currently being processed. This works
perfectly fine for normal Verbs as the requests are processed one at a
time and the s_tail_ack_queue is not moved until the request that it
points to is fully completed.

In this fashion, s_tail_ack_queue constantly chases r_head_ack_queue and
the two pointers can easily be used to determine "queue full" and "queue
empty" conditions.

The detection of these two conditions are imported in determining when an
old entry can safely be overwritten with a new received request and the
resources associated with the old request be safely released.

When pipelined TID RDMA WRITE is introduced into this mix, things look
very different. r_head_ack_queue is still the point at which a newly
received request will be inserted, s_tail_ack_queue is still the
currently processed request. However, with pipelined TID RDMA WRITE
requests, s_tail_ack_queue moves to the next request once all TID RDMA
WRITE responses for that request have been sent. The rest of the protocol
for a particular request is managed by other pointers specific to TID RDMA
- r_tid_tail and r_tid_ack - which point to the entries for which the next
TID RDMA DATA packets are going to arrive and the request for which
the next TID RDMA ACK packets are to be generated, respectively.

What this means is that entries in the ring, which are "behind"
s_tail_ack_queue (entries which s_tail_ack_queue has gone past) are no
longer considered complete. This is where the problem is - a newly
received request could potentially overwrite a still active TID RDMA WRITE
request.

The reason why the TID RDMA pointers trail s_tail_ack_queue is that the
normal Verbs send engine uses s_tail_ack_queue as the pointer for the next
response. Since TID RDMA WRITE responses are processed by the normal Verbs
send engine, s_tail_ack_queue had to be moved to the next entry once all
TID RDMA WRITE response packets were sent to get the desired pipelining
between requests. Doing otherwise would mean that the normal Verbs send
engine would not be able to send the TID RDMA WRITE responses for the next
TID RDMA request until the current one is fully completed.

This patch introduces the s_acked_ack_queue index to point to the next
request to complete on the responder side. For requests other than TID
RDMA WRITE, s_acked_ack_queue should always be kept in sync with
s_tail_ack_queue. For TID RDMA WRITE request, it may fall behind
s_tail_ack_queue.
Reviewed-by: default avatarMike Marciniszyn <mike.marciniszyn@intel.com>
Signed-off-by: default avatarMitko Haralanov <mitko.haralanov@intel.com>
Signed-off-by: default avatarKaike Wan <kaike.wan@intel.com>
Signed-off-by: default avatarDennis Dalessandro <dennis.dalessandro@intel.com>
Signed-off-by: default avatarDoug Ledford <dledford@redhat.com>
parent f5a4a95f
...@@ -120,6 +120,7 @@ static int make_rc_ack(struct hfi1_ibdev *dev, struct rvt_qp *qp, ...@@ -120,6 +120,7 @@ static int make_rc_ack(struct hfi1_ibdev *dev, struct rvt_qp *qp,
struct hfi1_qp_priv *priv = qp->priv; struct hfi1_qp_priv *priv = qp->priv;
bool last_pkt; bool last_pkt;
u32 delta; u32 delta;
u8 next = qp->s_tail_ack_queue;
trace_hfi1_rsp_make_rc_ack(qp, 0); trace_hfi1_rsp_make_rc_ack(qp, 0);
lockdep_assert_held(&qp->s_lock); lockdep_assert_held(&qp->s_lock);
...@@ -149,9 +150,17 @@ static int make_rc_ack(struct hfi1_ibdev *dev, struct rvt_qp *qp, ...@@ -149,9 +150,17 @@ static int make_rc_ack(struct hfi1_ibdev *dev, struct rvt_qp *qp,
* response has been sent instead of only being * response has been sent instead of only being
* constructed. * constructed.
*/ */
if (++qp->s_tail_ack_queue > if (++next > rvt_size_atomic(&dev->rdi))
rvt_size_atomic(ib_to_rvt(qp->ibqp.device))) next = 0;
qp->s_tail_ack_queue = 0; /*
* Only advance the s_acked_ack_queue pointer if there
* have been no TID RDMA requests.
*/
e = &qp->s_ack_queue[qp->s_tail_ack_queue];
if (e->opcode != TID_OP(WRITE_REQ) &&
qp->s_acked_ack_queue == qp->s_tail_ack_queue)
qp->s_acked_ack_queue = next;
qp->s_tail_ack_queue = next;
/* FALLTHROUGH */ /* FALLTHROUGH */
case OP(SEND_ONLY): case OP(SEND_ONLY):
case OP(ACKNOWLEDGE): case OP(ACKNOWLEDGE):
...@@ -172,6 +181,10 @@ static int make_rc_ack(struct hfi1_ibdev *dev, struct rvt_qp *qp, ...@@ -172,6 +181,10 @@ static int make_rc_ack(struct hfi1_ibdev *dev, struct rvt_qp *qp,
*/ */
len = e->rdma_sge.sge_length; len = e->rdma_sge.sge_length;
if (len && !e->rdma_sge.mr) { if (len && !e->rdma_sge.mr) {
if (qp->s_acked_ack_queue ==
qp->s_tail_ack_queue)
qp->s_acked_ack_queue =
qp->r_head_ack_queue;
qp->s_tail_ack_queue = qp->r_head_ack_queue; qp->s_tail_ack_queue = qp->r_head_ack_queue;
goto bail; goto bail;
} }
...@@ -202,6 +215,10 @@ static int make_rc_ack(struct hfi1_ibdev *dev, struct rvt_qp *qp, ...@@ -202,6 +215,10 @@ static int make_rc_ack(struct hfi1_ibdev *dev, struct rvt_qp *qp,
*/ */
len = e->rdma_sge.sge_length; len = e->rdma_sge.sge_length;
if (len && !e->rdma_sge.mr) { if (len && !e->rdma_sge.mr) {
if (qp->s_acked_ack_queue ==
qp->s_tail_ack_queue)
qp->s_acked_ack_queue =
qp->r_head_ack_queue;
qp->s_tail_ack_queue = qp->r_head_ack_queue; qp->s_tail_ack_queue = qp->r_head_ack_queue;
goto bail; goto bail;
} }
...@@ -2235,6 +2252,8 @@ static noinline int rc_rcv_error(struct ib_other_headers *ohdr, void *data, ...@@ -2235,6 +2252,8 @@ static noinline int rc_rcv_error(struct ib_other_headers *ohdr, void *data,
e->psn = psn; e->psn = psn;
if (old_req) if (old_req)
goto unlock_done; goto unlock_done;
if (qp->s_acked_ack_queue == qp->s_tail_ack_queue)
qp->s_acked_ack_queue = prev;
qp->s_tail_ack_queue = prev; qp->s_tail_ack_queue = prev;
break; break;
} }
...@@ -2248,6 +2267,8 @@ static noinline int rc_rcv_error(struct ib_other_headers *ohdr, void *data, ...@@ -2248,6 +2267,8 @@ static noinline int rc_rcv_error(struct ib_other_headers *ohdr, void *data,
*/ */
if (!e || e->opcode != (u8)opcode || old_req) if (!e || e->opcode != (u8)opcode || old_req)
goto unlock_done; goto unlock_done;
if (qp->s_tail_ack_queue == qp->s_acked_ack_queue)
qp->s_acked_ack_queue = prev;
qp->s_tail_ack_queue = prev; qp->s_tail_ack_queue = prev;
break; break;
} }
...@@ -2274,6 +2295,8 @@ static noinline int rc_rcv_error(struct ib_other_headers *ohdr, void *data, ...@@ -2274,6 +2295,8 @@ static noinline int rc_rcv_error(struct ib_other_headers *ohdr, void *data,
* Resend the RDMA read or atomic op which * Resend the RDMA read or atomic op which
* ACKs this duplicate request. * ACKs this duplicate request.
*/ */
if (qp->s_tail_ack_queue == qp->s_acked_ack_queue)
qp->s_acked_ack_queue = mra;
qp->s_tail_ack_queue = mra; qp->s_tail_ack_queue = mra;
break; break;
} }
...@@ -2646,7 +2669,7 @@ void hfi1_rc_rcv(struct hfi1_packet *packet) ...@@ -2646,7 +2669,7 @@ void hfi1_rc_rcv(struct hfi1_packet *packet)
if (next > rvt_size_atomic(ib_to_rvt(qp->ibqp.device))) if (next > rvt_size_atomic(ib_to_rvt(qp->ibqp.device)))
next = 0; next = 0;
spin_lock_irqsave(&qp->s_lock, flags); spin_lock_irqsave(&qp->s_lock, flags);
if (unlikely(next == qp->s_tail_ack_queue)) { if (unlikely(next == qp->s_acked_ack_queue)) {
if (!qp->s_ack_queue[next].sent) if (!qp->s_ack_queue[next].sent)
goto nack_inv_unlck; goto nack_inv_unlck;
update_ack_queue(qp, next); update_ack_queue(qp, next);
...@@ -2723,7 +2746,7 @@ void hfi1_rc_rcv(struct hfi1_packet *packet) ...@@ -2723,7 +2746,7 @@ void hfi1_rc_rcv(struct hfi1_packet *packet)
if (next > rvt_size_atomic(ib_to_rvt(qp->ibqp.device))) if (next > rvt_size_atomic(ib_to_rvt(qp->ibqp.device)))
next = 0; next = 0;
spin_lock_irqsave(&qp->s_lock, flags); spin_lock_irqsave(&qp->s_lock, flags);
if (unlikely(next == qp->s_tail_ack_queue)) { if (unlikely(next == qp->s_acked_ack_queue)) {
if (!qp->s_ack_queue[next].sent) if (!qp->s_ack_queue[next].sent)
goto nack_inv_unlck; goto nack_inv_unlck;
update_ack_queue(qp, next); update_ack_queue(qp, next);
......
...@@ -18,6 +18,7 @@ static inline void update_ack_queue(struct rvt_qp *qp, unsigned int n) ...@@ -18,6 +18,7 @@ static inline void update_ack_queue(struct rvt_qp *qp, unsigned int n)
if (next > rvt_size_atomic(ib_to_rvt(qp->ibqp.device))) if (next > rvt_size_atomic(ib_to_rvt(qp->ibqp.device)))
next = 0; next = 0;
qp->s_tail_ack_queue = next; qp->s_tail_ack_queue = next;
qp->s_acked_ack_queue = next;
qp->s_ack_state = OP(ACKNOWLEDGE); qp->s_ack_state = OP(ACKNOWLEDGE);
} }
......
...@@ -2044,6 +2044,8 @@ static int tid_rdma_rcv_error(struct hfi1_packet *packet, ...@@ -2044,6 +2044,8 @@ static int tid_rdma_rcv_error(struct hfi1_packet *packet,
goto unlock; goto unlock;
} }
/* Re-process old requests.*/ /* Re-process old requests.*/
if (qp->s_acked_ack_queue == qp->s_tail_ack_queue)
qp->s_acked_ack_queue = prev;
qp->s_tail_ack_queue = prev; qp->s_tail_ack_queue = prev;
/* /*
* Since the qp->s_tail_ack_queue is modified, the * Since the qp->s_tail_ack_queue is modified, the
......
...@@ -40,7 +40,7 @@ u16 hfi1_trace_get_tid_idx(u32 ent); ...@@ -40,7 +40,7 @@ u16 hfi1_trace_get_tid_idx(u32 ent);
#define RSP_INFO_PRN "[%s] qpn 0x%x state 0x%x s_state 0x%x psn 0x%x " \ #define RSP_INFO_PRN "[%s] qpn 0x%x state 0x%x s_state 0x%x psn 0x%x " \
"r_psn 0x%x r_state 0x%x r_flags 0x%x " \ "r_psn 0x%x r_state 0x%x r_flags 0x%x " \
"r_head_ack_queue %u s_tail_ack_queue %u " \ "r_head_ack_queue %u s_tail_ack_queue %u " \
"s_ack_state 0x%x " \ "s_acked_ack_queue %u s_ack_state 0x%x " \
"s_nak_state 0x%x s_flags 0x%x ps_flags 0x%x " \ "s_nak_state 0x%x s_flags 0x%x ps_flags 0x%x " \
"iow_flags 0x%lx" "iow_flags 0x%lx"
...@@ -62,7 +62,7 @@ u16 hfi1_trace_get_tid_idx(u32 ent); ...@@ -62,7 +62,7 @@ u16 hfi1_trace_get_tid_idx(u32 ent);
"s_next_psn 0x%x" "s_next_psn 0x%x"
#define RCV_ERR_PRN "[%s] qpn 0x%x s_flags 0x%x state 0x%x " \ #define RCV_ERR_PRN "[%s] qpn 0x%x s_flags 0x%x state 0x%x " \
"s_tail_ack_queue %u " \ "s_acked_ack_queue %u s_tail_ack_queue %u " \
"r_head_ack_queue %u opcode 0x%x psn 0x%x r_psn 0x%x " \ "r_head_ack_queue %u opcode 0x%x psn 0x%x r_psn 0x%x " \
" diff %d" " diff %d"
...@@ -671,6 +671,7 @@ DECLARE_EVENT_CLASS(/* rsp_info */ ...@@ -671,6 +671,7 @@ DECLARE_EVENT_CLASS(/* rsp_info */
__field(u8, r_flags) __field(u8, r_flags)
__field(u8, r_head_ack_queue) __field(u8, r_head_ack_queue)
__field(u8, s_tail_ack_queue) __field(u8, s_tail_ack_queue)
__field(u8, s_acked_ack_queue)
__field(u8, s_ack_state) __field(u8, s_ack_state)
__field(u8, s_nak_state) __field(u8, s_nak_state)
__field(u8, r_nak_state) __field(u8, r_nak_state)
...@@ -691,6 +692,7 @@ DECLARE_EVENT_CLASS(/* rsp_info */ ...@@ -691,6 +692,7 @@ DECLARE_EVENT_CLASS(/* rsp_info */
__entry->r_flags = qp->r_flags; __entry->r_flags = qp->r_flags;
__entry->r_head_ack_queue = qp->r_head_ack_queue; __entry->r_head_ack_queue = qp->r_head_ack_queue;
__entry->s_tail_ack_queue = qp->s_tail_ack_queue; __entry->s_tail_ack_queue = qp->s_tail_ack_queue;
__entry->s_acked_ack_queue = qp->s_acked_ack_queue;
__entry->s_ack_state = qp->s_ack_state; __entry->s_ack_state = qp->s_ack_state;
__entry->s_nak_state = qp->s_nak_state; __entry->s_nak_state = qp->s_nak_state;
__entry->s_flags = qp->s_flags; __entry->s_flags = qp->s_flags;
...@@ -709,6 +711,7 @@ DECLARE_EVENT_CLASS(/* rsp_info */ ...@@ -709,6 +711,7 @@ DECLARE_EVENT_CLASS(/* rsp_info */
__entry->r_flags, __entry->r_flags,
__entry->r_head_ack_queue, __entry->r_head_ack_queue,
__entry->s_tail_ack_queue, __entry->s_tail_ack_queue,
__entry->s_acked_ack_queue,
__entry->s_ack_state, __entry->s_ack_state,
__entry->s_nak_state, __entry->s_nak_state,
__entry->s_flags, __entry->s_flags,
...@@ -1007,6 +1010,7 @@ DECLARE_EVENT_CLASS(/* rc_rcv_err */ ...@@ -1007,6 +1010,7 @@ DECLARE_EVENT_CLASS(/* rc_rcv_err */
__field(u32, qpn) __field(u32, qpn)
__field(u32, s_flags) __field(u32, s_flags)
__field(u8, state) __field(u8, state)
__field(u8, s_acked_ack_queue)
__field(u8, s_tail_ack_queue) __field(u8, s_tail_ack_queue)
__field(u8, r_head_ack_queue) __field(u8, r_head_ack_queue)
__field(u32, opcode) __field(u32, opcode)
...@@ -1019,6 +1023,7 @@ DECLARE_EVENT_CLASS(/* rc_rcv_err */ ...@@ -1019,6 +1023,7 @@ DECLARE_EVENT_CLASS(/* rc_rcv_err */
__entry->qpn = qp->ibqp.qp_num; __entry->qpn = qp->ibqp.qp_num;
__entry->s_flags = qp->s_flags; __entry->s_flags = qp->s_flags;
__entry->state = qp->state; __entry->state = qp->state;
__entry->s_acked_ack_queue = qp->s_acked_ack_queue;
__entry->s_tail_ack_queue = qp->s_tail_ack_queue; __entry->s_tail_ack_queue = qp->s_tail_ack_queue;
__entry->r_head_ack_queue = qp->r_head_ack_queue; __entry->r_head_ack_queue = qp->r_head_ack_queue;
__entry->opcode = opcode; __entry->opcode = opcode;
...@@ -1032,6 +1037,7 @@ DECLARE_EVENT_CLASS(/* rc_rcv_err */ ...@@ -1032,6 +1037,7 @@ DECLARE_EVENT_CLASS(/* rc_rcv_err */
__entry->qpn, __entry->qpn,
__entry->s_flags, __entry->s_flags,
__entry->state, __entry->state,
__entry->s_acked_ack_queue,
__entry->s_tail_ack_queue, __entry->s_tail_ack_queue,
__entry->r_head_ack_queue, __entry->r_head_ack_queue,
__entry->opcode, __entry->opcode,
......
...@@ -854,6 +854,7 @@ static void rvt_init_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp, ...@@ -854,6 +854,7 @@ static void rvt_init_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
qp->s_mig_state = IB_MIG_MIGRATED; qp->s_mig_state = IB_MIG_MIGRATED;
qp->r_head_ack_queue = 0; qp->r_head_ack_queue = 0;
qp->s_tail_ack_queue = 0; qp->s_tail_ack_queue = 0;
qp->s_acked_ack_queue = 0;
qp->s_num_rd_atomic = 0; qp->s_num_rd_atomic = 0;
if (qp->r_rq.wq) { if (qp->r_rq.wq) {
qp->r_rq.wq->head = 0; qp->r_rq.wq->head = 0;
......
...@@ -375,6 +375,7 @@ struct rvt_qp { ...@@ -375,6 +375,7 @@ struct rvt_qp {
u8 s_rnr_retry; /* requester RNR retry counter */ u8 s_rnr_retry; /* requester RNR retry counter */
u8 s_num_rd_atomic; /* number of RDMA read/atomic pending */ u8 s_num_rd_atomic; /* number of RDMA read/atomic pending */
u8 s_tail_ack_queue; /* index into s_ack_queue[] */ u8 s_tail_ack_queue; /* index into s_ack_queue[] */
u8 s_acked_ack_queue; /* index into s_ack_queue[] */
struct rvt_sge_state s_ack_rdma_sge; struct rvt_sge_state s_ack_rdma_sge;
struct timer_list s_timer; struct timer_list s_timer;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment