Commit 4f1d9726 authored by Rahul Lakkireddy's avatar Rahul Lakkireddy Committed by David S. Miller

cxgb4: improve credits recovery in TC-MQPRIO Tx path

Request credit update for every half credits consumed, including
the current request. Also, avoid re-trying to post packets when there
are no credits left. The credit update reply via interrupt will
eventually restore the credits and will invoke the Tx path again.
Signed-off-by: default avatarRahul Lakkireddy <rahul.lakkireddy@chelsio.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 3430223d
...@@ -2091,10 +2091,9 @@ static inline u8 ethofld_calc_tx_flits(struct adapter *adap, ...@@ -2091,10 +2091,9 @@ static inline u8 ethofld_calc_tx_flits(struct adapter *adap,
return flits + nsgl; return flits + nsgl;
} }
static inline void *write_eo_wr(struct adapter *adap, static void *write_eo_wr(struct adapter *adap, struct sge_eosw_txq *eosw_txq,
struct sge_eosw_txq *eosw_txq, struct sk_buff *skb, struct fw_eth_tx_eo_wr *wr,
struct sk_buff *skb, struct fw_eth_tx_eo_wr *wr, u32 hdr_len, u32 wrlen)
u32 hdr_len, u32 wrlen)
{ {
const struct skb_shared_info *ssi = skb_shinfo(skb); const struct skb_shared_info *ssi = skb_shinfo(skb);
struct cpl_tx_pkt_core *cpl; struct cpl_tx_pkt_core *cpl;
...@@ -2113,7 +2112,8 @@ static inline void *write_eo_wr(struct adapter *adap, ...@@ -2113,7 +2112,8 @@ static inline void *write_eo_wr(struct adapter *adap,
immd_len += hdr_len; immd_len += hdr_len;
if (!eosw_txq->ncompl || if (!eosw_txq->ncompl ||
eosw_txq->last_compl >= adap->params.ofldq_wr_cred / 2) { (eosw_txq->last_compl + wrlen16) >=
(adap->params.ofldq_wr_cred / 2)) {
compl = true; compl = true;
eosw_txq->ncompl++; eosw_txq->ncompl++;
eosw_txq->last_compl = 0; eosw_txq->last_compl = 0;
...@@ -2153,8 +2153,8 @@ static inline void *write_eo_wr(struct adapter *adap, ...@@ -2153,8 +2153,8 @@ static inline void *write_eo_wr(struct adapter *adap,
return cpl; return cpl;
} }
static void ethofld_hard_xmit(struct net_device *dev, static int ethofld_hard_xmit(struct net_device *dev,
struct sge_eosw_txq *eosw_txq) struct sge_eosw_txq *eosw_txq)
{ {
struct port_info *pi = netdev2pinfo(dev); struct port_info *pi = netdev2pinfo(dev);
struct adapter *adap = netdev2adap(dev); struct adapter *adap = netdev2adap(dev);
...@@ -2167,8 +2167,8 @@ static void ethofld_hard_xmit(struct net_device *dev, ...@@ -2167,8 +2167,8 @@ static void ethofld_hard_xmit(struct net_device *dev,
bool skip_eotx_wr = false; bool skip_eotx_wr = false;
struct tx_sw_desc *d; struct tx_sw_desc *d;
struct sk_buff *skb; struct sk_buff *skb;
int left, ret = 0;
u8 flits, ndesc; u8 flits, ndesc;
int left;
eohw_txq = &adap->sge.eohw_txq[eosw_txq->hwqid]; eohw_txq = &adap->sge.eohw_txq[eosw_txq->hwqid];
spin_lock(&eohw_txq->lock); spin_lock(&eohw_txq->lock);
...@@ -2198,11 +2198,19 @@ static void ethofld_hard_xmit(struct net_device *dev, ...@@ -2198,11 +2198,19 @@ static void ethofld_hard_xmit(struct net_device *dev,
wrlen = flits * 8; wrlen = flits * 8;
wrlen16 = DIV_ROUND_UP(wrlen, 16); wrlen16 = DIV_ROUND_UP(wrlen, 16);
/* If there are no CPL credits, then wait for credits left = txq_avail(&eohw_txq->q) - ndesc;
* to come back and retry again
/* If there are no descriptors left in hardware queues or no
* CPL credits left in software queues, then wait for them
* to come back and retry again. Note that we always request
* for credits update via interrupt for every half credits
* consumed. So, the interrupt will eventually restore the
* credits and invoke the Tx path again.
*/ */
if (unlikely(wrlen16 > eosw_txq->cred)) if (unlikely(left < 0 || wrlen16 > eosw_txq->cred)) {
ret = -ENOMEM;
goto out_unlock; goto out_unlock;
}
if (unlikely(skip_eotx_wr)) { if (unlikely(skip_eotx_wr)) {
start = (u64 *)wr; start = (u64 *)wr;
...@@ -2231,7 +2239,8 @@ static void ethofld_hard_xmit(struct net_device *dev, ...@@ -2231,7 +2239,8 @@ static void ethofld_hard_xmit(struct net_device *dev,
sgl = (u64 *)inline_tx_skb_header(skb, &eohw_txq->q, (void *)start, sgl = (u64 *)inline_tx_skb_header(skb, &eohw_txq->q, (void *)start,
hdr_len); hdr_len);
if (data_len) { if (data_len) {
if (unlikely(cxgb4_map_skb(adap->pdev_dev, skb, d->addr))) { ret = cxgb4_map_skb(adap->pdev_dev, skb, d->addr);
if (unlikely(ret)) {
memset(d->addr, 0, sizeof(d->addr)); memset(d->addr, 0, sizeof(d->addr));
eohw_txq->mapping_err++; eohw_txq->mapping_err++;
goto out_unlock; goto out_unlock;
...@@ -2277,12 +2286,13 @@ static void ethofld_hard_xmit(struct net_device *dev, ...@@ -2277,12 +2286,13 @@ static void ethofld_hard_xmit(struct net_device *dev,
out_unlock: out_unlock:
spin_unlock(&eohw_txq->lock); spin_unlock(&eohw_txq->lock);
return ret;
} }
static void ethofld_xmit(struct net_device *dev, struct sge_eosw_txq *eosw_txq) static void ethofld_xmit(struct net_device *dev, struct sge_eosw_txq *eosw_txq)
{ {
struct sk_buff *skb; struct sk_buff *skb;
int pktcount; int pktcount, ret;
switch (eosw_txq->state) { switch (eosw_txq->state) {
case CXGB4_EO_STATE_ACTIVE: case CXGB4_EO_STATE_ACTIVE:
...@@ -2307,7 +2317,9 @@ static void ethofld_xmit(struct net_device *dev, struct sge_eosw_txq *eosw_txq) ...@@ -2307,7 +2317,9 @@ static void ethofld_xmit(struct net_device *dev, struct sge_eosw_txq *eosw_txq)
continue; continue;
} }
ethofld_hard_xmit(dev, eosw_txq); ret = ethofld_hard_xmit(dev, eosw_txq);
if (ret)
break;
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment