Commit 747b931f authored by Kaike Wan's avatar Kaike Wan Committed by Jason Gunthorpe

IB/hfi1: Implement CCA for TID RDMA protocol

Currently, FECN handling is not implemented on TID RDMA expected receive
packets and therefore CCA can't be turned on when TID RDMA is
enabled. This patch adds the CCA support to TID RDMA protocol by:

- modifying FECN RSM rule to include kernel receive contexts

- For TID_RDMA READ RESP or TID RDMA ACK packet, a CNP will be sent out if
  the FECN bit is set. For other TID RDMA packets that generate at least
  one response packet, the BECN bit will be set in the first response
  packet

- Copying expected packet data to destination buffer when FECN bit is set
  in the TID RDMA READ RESP or TID RDMA WRITE DATA packet. In this case,
  the expected packet is received as an eager packet

- Handling the TID sequence error for subsequent normal expected packets.
Reviewed-by: default avatarMike Marciniszyn <mike.marciniszyn@intel.com>
Reviewed-by: default avatarDennis Dalessandro <dennis.dalessandro@intel.com>
Reviewed-by: default avatarMichael J. Ruhl <michael.j.ruhl@intel.com>
Signed-off-by: default avatarKaike Wan <kaike.wan@intel.com>
Signed-off-by: default avatarDennis Dalessandro <dennis.dalessandro@intel.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@mellanox.com>
parent 8da0f0f2
...@@ -13297,15 +13297,18 @@ static int set_up_context_variables(struct hfi1_devdata *dd) ...@@ -13297,15 +13297,18 @@ static int set_up_context_variables(struct hfi1_devdata *dd)
/* /*
* The RMT entries are currently allocated as shown below: * The RMT entries are currently allocated as shown below:
* 1. QOS (0 to 128 entries); * 1. QOS (0 to 128 entries);
* 2. FECN for PSM (num_user_contexts + num_vnic_contexts); * 2. FECN (num_kernel_context - 1 + num_user_contexts +
* num_vnic_contexts);
* 3. VNIC (num_vnic_contexts). * 3. VNIC (num_vnic_contexts).
* It should be noted that PSM FECN oversubscribe num_vnic_contexts * It should be noted that FECN oversubscribe num_vnic_contexts
* entries of RMT because both VNIC and PSM could allocate any receive * entries of RMT because both VNIC and PSM could allocate any receive
* context between dd->first_dyn_alloc_text and dd->num_rcv_contexts, * context between dd->first_dyn_alloc_text and dd->num_rcv_contexts,
* and PSM FECN must reserve an RMT entry for each possible PSM receive * and PSM FECN must reserve an RMT entry for each possible PSM receive
* context. * context.
*/ */
rmt_count = qos_rmt_entries(dd, NULL, NULL) + (num_vnic_contexts * 2); rmt_count = qos_rmt_entries(dd, NULL, NULL) + (num_vnic_contexts * 2);
if (HFI1_CAP_IS_KSET(TID_RDMA))
rmt_count += num_kernel_contexts - 1;
if (rmt_count + n_usr_ctxts > NUM_MAP_ENTRIES) { if (rmt_count + n_usr_ctxts > NUM_MAP_ENTRIES) {
user_rmt_reduced = NUM_MAP_ENTRIES - rmt_count; user_rmt_reduced = NUM_MAP_ENTRIES - rmt_count;
dd_dev_err(dd, dd_dev_err(dd,
...@@ -14288,37 +14291,43 @@ static void init_qos(struct hfi1_devdata *dd, struct rsm_map_table *rmt) ...@@ -14288,37 +14291,43 @@ static void init_qos(struct hfi1_devdata *dd, struct rsm_map_table *rmt)
init_qpmap_table(dd, FIRST_KERNEL_KCTXT, dd->n_krcv_queues - 1); init_qpmap_table(dd, FIRST_KERNEL_KCTXT, dd->n_krcv_queues - 1);
} }
static void init_user_fecn_handling(struct hfi1_devdata *dd, static void init_fecn_handling(struct hfi1_devdata *dd,
struct rsm_map_table *rmt) struct rsm_map_table *rmt)
{ {
struct rsm_rule_data rrd; struct rsm_rule_data rrd;
u64 reg; u64 reg;
int i, idx, regoff, regidx; int i, idx, regoff, regidx, start;
u8 offset; u8 offset;
u32 total_cnt; u32 total_cnt;
if (HFI1_CAP_IS_KSET(TID_RDMA))
/* Exclude context 0 */
start = 1;
else
start = dd->first_dyn_alloc_ctxt;
total_cnt = dd->num_rcv_contexts - start;
/* there needs to be enough room in the map table */ /* there needs to be enough room in the map table */
total_cnt = dd->num_rcv_contexts - dd->first_dyn_alloc_ctxt;
if (rmt->used + total_cnt >= NUM_MAP_ENTRIES) { if (rmt->used + total_cnt >= NUM_MAP_ENTRIES) {
dd_dev_err(dd, "User FECN handling disabled - too many user contexts allocated\n"); dd_dev_err(dd, "FECN handling disabled - too many contexts allocated\n");
return; return;
} }
/* /*
* RSM will extract the destination context as an index into the * RSM will extract the destination context as an index into the
* map table. The destination contexts are a sequential block * map table. The destination contexts are a sequential block
* in the range first_dyn_alloc_ctxt...num_rcv_contexts-1 (inclusive). * in the range start...num_rcv_contexts-1 (inclusive).
* Map entries are accessed as offset + extracted value. Adjust * Map entries are accessed as offset + extracted value. Adjust
* the added offset so this sequence can be placed anywhere in * the added offset so this sequence can be placed anywhere in
* the table - as long as the entries themselves do not wrap. * the table - as long as the entries themselves do not wrap.
* There are only enough bits in offset for the table size, so * There are only enough bits in offset for the table size, so
* start with that to allow for a "negative" offset. * start with that to allow for a "negative" offset.
*/ */
offset = (u8)(NUM_MAP_ENTRIES + (int)rmt->used - offset = (u8)(NUM_MAP_ENTRIES + rmt->used - start);
(int)dd->first_dyn_alloc_ctxt);
for (i = dd->first_dyn_alloc_ctxt, idx = rmt->used; for (i = start, idx = rmt->used; i < dd->num_rcv_contexts;
i < dd->num_rcv_contexts; i++, idx++) { i++, idx++) {
/* replace with identity mapping */ /* replace with identity mapping */
regoff = (idx % 8) * 8; regoff = (idx % 8) * 8;
regidx = idx / 8; regidx = idx / 8;
...@@ -14440,7 +14449,7 @@ static void init_rxe(struct hfi1_devdata *dd) ...@@ -14440,7 +14449,7 @@ static void init_rxe(struct hfi1_devdata *dd)
rmt = alloc_rsm_map_table(dd); rmt = alloc_rsm_map_table(dd);
/* set up QOS, including the QPN map table */ /* set up QOS, including the QPN map table */
init_qos(dd, rmt); init_qos(dd, rmt);
init_user_fecn_handling(dd, rmt); init_fecn_handling(dd, rmt);
complete_rsm_map_table(dd, rmt); complete_rsm_map_table(dd, rmt);
/* record number of used rsm map entries for vnic */ /* record number of used rsm map entries for vnic */
dd->vnic.rmt_start = rmt->used; dd->vnic.rmt_start = rmt->used;
......
...@@ -516,7 +516,9 @@ bool hfi1_process_ecn_slowpath(struct rvt_qp *qp, struct hfi1_packet *pkt, ...@@ -516,7 +516,9 @@ bool hfi1_process_ecn_slowpath(struct rvt_qp *qp, struct hfi1_packet *pkt,
*/ */
do_cnp = prescan || do_cnp = prescan ||
(opcode >= IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST && (opcode >= IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST &&
opcode <= IB_OPCODE_RC_ATOMIC_ACKNOWLEDGE); opcode <= IB_OPCODE_RC_ATOMIC_ACKNOWLEDGE) ||
opcode == TID_OP(READ_RESP) ||
opcode == TID_OP(ACK);
/* Call appropriate CNP handler */ /* Call appropriate CNP handler */
if (!ignore_fecn && do_cnp && fecn) if (!ignore_fecn && do_cnp && fecn)
......
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment