Commit b660bccb authored by Raju Rangoju's avatar Raju Rangoju Committed by Jakub Kicinski

cxgb4: enable interrupt based Tx completions for T5

Enable interrupt based Tx completions to improve latency for T5.
The consumer index (CIDX) will now come via interrupts so that Tx
SKBs can be freed up sooner in Rx path. Also, enforce CIDX flush
threshold override (CIDXFTHRESHO) to improve latency for slow
traffic. This ensures that the interrupt is generated immediately
whenever hardware catches up with driver (i.e. CIDX == PIDX is
reached), which is often the case for slow traffic.
Signed-off-by: default avatarRaju Rangoju <rajur@chelsio.com>
Link: https://lore.kernel.org/r/20210115102059.6846-1-rajur@chelsio.comSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parent c761b2df
...@@ -1600,7 +1600,8 @@ static netdev_tx_t cxgb4_eth_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -1600,7 +1600,8 @@ static netdev_tx_t cxgb4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
* has opened up. * has opened up.
*/ */
eth_txq_stop(q); eth_txq_stop(q);
wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F; if (chip_ver > CHELSIO_T5)
wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
} }
wr = (void *)&q->q.desc[q->q.pidx]; wr = (void *)&q->q.desc[q->q.pidx];
...@@ -1832,6 +1833,7 @@ static netdev_tx_t cxgb4_vf_eth_xmit(struct sk_buff *skb, ...@@ -1832,6 +1833,7 @@ static netdev_tx_t cxgb4_vf_eth_xmit(struct sk_buff *skb,
struct adapter *adapter; struct adapter *adapter;
int qidx, credits, ret; int qidx, credits, ret;
size_t fw_hdr_copy_len; size_t fw_hdr_copy_len;
unsigned int chip_ver;
u64 cntrl, *end; u64 cntrl, *end;
u32 wr_mid; u32 wr_mid;
...@@ -1896,6 +1898,7 @@ static netdev_tx_t cxgb4_vf_eth_xmit(struct sk_buff *skb, ...@@ -1896,6 +1898,7 @@ static netdev_tx_t cxgb4_vf_eth_xmit(struct sk_buff *skb,
goto out_free; goto out_free;
} }
chip_ver = CHELSIO_CHIP_VERSION(adapter->params.chip);
wr_mid = FW_WR_LEN16_V(DIV_ROUND_UP(flits, 2)); wr_mid = FW_WR_LEN16_V(DIV_ROUND_UP(flits, 2));
if (unlikely(credits < ETHTXQ_STOP_THRES)) { if (unlikely(credits < ETHTXQ_STOP_THRES)) {
/* After we're done injecting the Work Request for this /* After we're done injecting the Work Request for this
...@@ -1907,7 +1910,8 @@ static netdev_tx_t cxgb4_vf_eth_xmit(struct sk_buff *skb, ...@@ -1907,7 +1910,8 @@ static netdev_tx_t cxgb4_vf_eth_xmit(struct sk_buff *skb,
* has opened up. * has opened up.
*/ */
eth_txq_stop(txq); eth_txq_stop(txq);
wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F; if (chip_ver > CHELSIO_T5)
wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
} }
/* Start filling in our Work Request. Note that we do _not_ handle /* Start filling in our Work Request. Note that we do _not_ handle
...@@ -1960,7 +1964,7 @@ static netdev_tx_t cxgb4_vf_eth_xmit(struct sk_buff *skb, ...@@ -1960,7 +1964,7 @@ static netdev_tx_t cxgb4_vf_eth_xmit(struct sk_buff *skb,
*/ */
cpl = (void *)(lso + 1); cpl = (void *)(lso + 1);
if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5) if (chip_ver <= CHELSIO_T5)
cntrl = TXPKT_ETHHDR_LEN_V(eth_xtra_len); cntrl = TXPKT_ETHHDR_LEN_V(eth_xtra_len);
else else
cntrl = T6_TXPKT_ETHHDR_LEN_V(eth_xtra_len); cntrl = T6_TXPKT_ETHHDR_LEN_V(eth_xtra_len);
...@@ -3598,6 +3602,25 @@ static void t4_tx_completion_handler(struct sge_rspq *rspq, ...@@ -3598,6 +3602,25 @@ static void t4_tx_completion_handler(struct sge_rspq *rspq,
} }
txq = &s->ethtxq[pi->first_qset + rspq->idx]; txq = &s->ethtxq[pi->first_qset + rspq->idx];
/* We've got the Hardware Consumer Index Update in the Egress Update
* message. These Egress Update messages will be our sole CIDX Updates
* we get since we don't want to chew up PCIe bandwidth for both Ingress
* Messages and Status Page writes. However, The code which manages
* reclaiming successfully DMA'ed TX Work Requests uses the CIDX value
* stored in the Status Page at the end of the TX Queue. It's easiest
* to simply copy the CIDX Update value from the Egress Update message
* to the Status Page. Also note that no Endian issues need to be
* considered here since both are Big Endian and we're just copying
* bytes consistently ...
*/
if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5) {
struct cpl_sge_egr_update *egr;
egr = (struct cpl_sge_egr_update *)rsp;
WRITE_ONCE(txq->q.stat->cidx, egr->cidx);
}
t4_sge_eth_txq_egress_update(adapter, txq, -1); t4_sge_eth_txq_egress_update(adapter, txq, -1);
} }
...@@ -4583,11 +4606,15 @@ int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq, ...@@ -4583,11 +4606,15 @@ int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
* write the CIDX Updates into the Status Page at the end of the * write the CIDX Updates into the Status Page at the end of the
* TX Queue. * TX Queue.
*/ */
c.autoequiqe_to_viid = htonl(FW_EQ_ETH_CMD_AUTOEQUEQE_F | c.autoequiqe_to_viid = htonl(((chip_ver <= CHELSIO_T5) ?
FW_EQ_ETH_CMD_AUTOEQUIQE_F :
FW_EQ_ETH_CMD_AUTOEQUEQE_F) |
FW_EQ_ETH_CMD_VIID_V(pi->viid)); FW_EQ_ETH_CMD_VIID_V(pi->viid));
c.fetchszm_to_iqid = c.fetchszm_to_iqid =
htonl(FW_EQ_ETH_CMD_HOSTFCMODE_V(HOSTFCMODE_STATUS_PAGE_X) | htonl(FW_EQ_ETH_CMD_HOSTFCMODE_V((chip_ver <= CHELSIO_T5) ?
HOSTFCMODE_INGRESS_QUEUE_X :
HOSTFCMODE_STATUS_PAGE_X) |
FW_EQ_ETH_CMD_PCIECHN_V(pi->tx_chan) | FW_EQ_ETH_CMD_PCIECHN_V(pi->tx_chan) |
FW_EQ_ETH_CMD_FETCHRO_F | FW_EQ_ETH_CMD_IQID_V(iqid)); FW_EQ_ETH_CMD_FETCHRO_F | FW_EQ_ETH_CMD_IQID_V(iqid));
...@@ -4598,6 +4625,7 @@ int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq, ...@@ -4598,6 +4625,7 @@ int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
: FETCHBURSTMIN_64B_T6_X) | : FETCHBURSTMIN_64B_T6_X) |
FW_EQ_ETH_CMD_FBMAX_V(FETCHBURSTMAX_512B_X) | FW_EQ_ETH_CMD_FBMAX_V(FETCHBURSTMAX_512B_X) |
FW_EQ_ETH_CMD_CIDXFTHRESH_V(CIDXFLUSHTHRESH_32_X) | FW_EQ_ETH_CMD_CIDXFTHRESH_V(CIDXFLUSHTHRESH_32_X) |
FW_EQ_ETH_CMD_CIDXFTHRESHO_V(chip_ver == CHELSIO_T5) |
FW_EQ_ETH_CMD_EQSIZE_V(nentries)); FW_EQ_ETH_CMD_EQSIZE_V(nentries));
c.eqaddr = cpu_to_be64(txq->q.phys_addr); c.eqaddr = cpu_to_be64(txq->q.phys_addr);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment