Commit 26dda7da authored by Nithin Dabilpuram's avatar Nithin Dabilpuram Committed by David S. Miller

octeontx2-af: Restrict TL1 allocation and configuration

TL1 is the root node in the scheduling hierarchy and
it is a global resource with a limited number.

This patch introduces restriction and validation on
the allocation of the TL1 nodes for the effective resource
sharing across the AF consumers.

- Limit TL1 allocation to 2 per lmac.
  One could be for the normal link and one for IEEE802.3br
  express link (Express Send DMA).
  Effectively all the VF's of an RVU PF(lmac) share the two TL1 schqs.
- TL1 cannot be freed once allocated.
- Allow VF's to only apply default config to TL1 if not
  already applied. PF's can always overwrite the TL1 config.
- Consider NIX_AQ_INSTOP_WRITE while validating txschq
  when sq.ena is set.
Signed-off-by: default avatarKrzysztof Kanas <kkanas@marvell.com>
Signed-off-by: default avatarNithin Dabilpuram <ndabilpuram@marvell.com>
Signed-off-by: default avatarJerin Jacob <jerinj@marvell.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 7ee74697
...@@ -143,6 +143,9 @@ enum nix_scheduler { ...@@ -143,6 +143,9 @@ enum nix_scheduler {
NIX_TXSCH_LVL_CNT = 0x5, NIX_TXSCH_LVL_CNT = 0x5,
}; };
#define TXSCH_TL1_DFLT_RR_QTM ((1 << 24) - 1)
#define TXSCH_TL1_DFLT_RR_PRIO (0x1ull)
/* Min/Max packet sizes, excluding FCS */ /* Min/Max packet sizes, excluding FCS */
#define NIC_HW_MIN_FRS 40 #define NIC_HW_MIN_FRS 40
#define NIC_HW_MAX_FRS 9212 #define NIC_HW_MAX_FRS 9212
......
...@@ -156,7 +156,11 @@ struct rvu_pfvf { ...@@ -156,7 +156,11 @@ struct rvu_pfvf {
struct nix_txsch { struct nix_txsch {
struct rsrc_bmap schq; struct rsrc_bmap schq;
u8 lvl; u8 lvl;
u16 *pfvf_map; #define NIX_TXSCHQ_TL1_CFG_DONE BIT_ULL(0)
#define TXSCH_MAP_FUNC(__pfvf_map) ((__pfvf_map) & 0xFFFF)
#define TXSCH_MAP_FLAGS(__pfvf_map) ((__pfvf_map) >> 16)
#define TXSCH_MAP(__func, __flags) (((__func) & 0xFFFF) | ((__flags) << 16))
u32 *pfvf_map;
}; };
struct npc_pkind { struct npc_pkind {
......
...@@ -127,6 +127,7 @@ static bool is_valid_txschq(struct rvu *rvu, int blkaddr, ...@@ -127,6 +127,7 @@ static bool is_valid_txschq(struct rvu *rvu, int blkaddr,
{ {
struct nix_txsch *txsch; struct nix_txsch *txsch;
struct nix_hw *nix_hw; struct nix_hw *nix_hw;
u16 map_func;
nix_hw = get_nix_hw(rvu->hw, blkaddr); nix_hw = get_nix_hw(rvu->hw, blkaddr);
if (!nix_hw) if (!nix_hw)
...@@ -138,11 +139,18 @@ static bool is_valid_txschq(struct rvu *rvu, int blkaddr, ...@@ -138,11 +139,18 @@ static bool is_valid_txschq(struct rvu *rvu, int blkaddr,
return false; return false;
mutex_lock(&rvu->rsrc_lock); mutex_lock(&rvu->rsrc_lock);
if (txsch->pfvf_map[schq] != pcifunc) { map_func = TXSCH_MAP_FUNC(txsch->pfvf_map[schq]);
mutex_unlock(&rvu->rsrc_lock); mutex_unlock(&rvu->rsrc_lock);
/* For TL1 schq, sharing across VF's of same PF is ok */
if (lvl == NIX_TXSCH_LVL_TL1 &&
rvu_get_pf(map_func) != rvu_get_pf(pcifunc))
return false; return false;
}
mutex_unlock(&rvu->rsrc_lock); if (lvl != NIX_TXSCH_LVL_TL1 &&
map_func != pcifunc)
return false;
return true; return true;
} }
...@@ -494,7 +502,9 @@ static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req, ...@@ -494,7 +502,9 @@ static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req,
/* Check if SQ pointed SMQ belongs to this PF/VF or not */ /* Check if SQ pointed SMQ belongs to this PF/VF or not */
if (req->ctype == NIX_AQ_CTYPE_SQ && if (req->ctype == NIX_AQ_CTYPE_SQ &&
req->op != NIX_AQ_INSTOP_WRITE) { ((req->op == NIX_AQ_INSTOP_INIT && req->sq.ena) ||
(req->op == NIX_AQ_INSTOP_WRITE &&
req->sq_mask.ena && req->sq_mask.smq && req->sq.ena))) {
if (!is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_SMQ, if (!is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_SMQ,
pcifunc, req->sq.smq)) pcifunc, req->sq.smq))
return NIX_AF_ERR_AQ_ENQUEUE; return NIX_AF_ERR_AQ_ENQUEUE;
...@@ -987,6 +997,73 @@ static void nix_reset_tx_linkcfg(struct rvu *rvu, int blkaddr, ...@@ -987,6 +997,73 @@ static void nix_reset_tx_linkcfg(struct rvu *rvu, int blkaddr,
NIX_AF_TL3_TL2X_LINKX_CFG(schq, link), 0x00); NIX_AF_TL3_TL2X_LINKX_CFG(schq, link), 0x00);
} }
static int
rvu_get_tl1_schqs(struct rvu *rvu, int blkaddr, u16 pcifunc,
u16 *schq_list, u16 *schq_cnt)
{
struct nix_txsch *txsch;
struct nix_hw *nix_hw;
struct rvu_pfvf *pfvf;
u8 cgx_id, lmac_id;
u16 schq_base;
u32 *pfvf_map;
int pf, intf;
nix_hw = get_nix_hw(rvu->hw, blkaddr);
if (!nix_hw)
return -ENODEV;
pfvf = rvu_get_pfvf(rvu, pcifunc);
txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL1];
pfvf_map = txsch->pfvf_map;
pf = rvu_get_pf(pcifunc);
/* static allocation as two TL1's per link */
intf = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
switch (intf) {
case NIX_INTF_TYPE_CGX:
rvu_get_cgx_lmac_id(pfvf->cgx_lmac, &cgx_id, &lmac_id);
schq_base = (cgx_id * MAX_LMAC_PER_CGX + lmac_id) * 2;
break;
case NIX_INTF_TYPE_LBK:
schq_base = rvu->cgx_cnt_max * MAX_LMAC_PER_CGX * 2;
break;
default:
return -ENODEV;
}
if (schq_base + 1 > txsch->schq.max)
return -ENODEV;
/* init pfvf_map as we store flags */
if (pfvf_map[schq_base] == U32_MAX) {
pfvf_map[schq_base] =
TXSCH_MAP((pf << RVU_PFVF_PF_SHIFT), 0);
pfvf_map[schq_base + 1] =
TXSCH_MAP((pf << RVU_PFVF_PF_SHIFT), 0);
/* Onetime reset for TL1 */
nix_reset_tx_linkcfg(rvu, blkaddr,
NIX_TXSCH_LVL_TL1, schq_base);
nix_reset_tx_shaping(rvu, blkaddr,
NIX_TXSCH_LVL_TL1, schq_base);
nix_reset_tx_linkcfg(rvu, blkaddr,
NIX_TXSCH_LVL_TL1, schq_base + 1);
nix_reset_tx_shaping(rvu, blkaddr,
NIX_TXSCH_LVL_TL1, schq_base + 1);
}
if (schq_list && schq_cnt) {
schq_list[0] = schq_base;
schq_list[1] = schq_base + 1;
*schq_cnt = 2;
}
return 0;
}
int rvu_mbox_handler_nix_txsch_alloc(struct rvu *rvu, int rvu_mbox_handler_nix_txsch_alloc(struct rvu *rvu,
struct nix_txsch_alloc_req *req, struct nix_txsch_alloc_req *req,
struct nix_txsch_alloc_rsp *rsp) struct nix_txsch_alloc_rsp *rsp)
...@@ -997,6 +1074,7 @@ int rvu_mbox_handler_nix_txsch_alloc(struct rvu *rvu, ...@@ -997,6 +1074,7 @@ int rvu_mbox_handler_nix_txsch_alloc(struct rvu *rvu,
struct rvu_pfvf *pfvf; struct rvu_pfvf *pfvf;
struct nix_hw *nix_hw; struct nix_hw *nix_hw;
int blkaddr, rc = 0; int blkaddr, rc = 0;
u32 *pfvf_map;
u16 schq; u16 schq;
pfvf = rvu_get_pfvf(rvu, pcifunc); pfvf = rvu_get_pfvf(rvu, pcifunc);
...@@ -1012,13 +1090,23 @@ int rvu_mbox_handler_nix_txsch_alloc(struct rvu *rvu, ...@@ -1012,13 +1090,23 @@ int rvu_mbox_handler_nix_txsch_alloc(struct rvu *rvu,
for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) { for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
txsch = &nix_hw->txsch[lvl]; txsch = &nix_hw->txsch[lvl];
req_schq = req->schq_contig[lvl] + req->schq[lvl]; req_schq = req->schq_contig[lvl] + req->schq[lvl];
pfvf_map = txsch->pfvf_map;
if (!req_schq)
continue;
/* There are only 28 TL1s */ /* There are only 28 TL1s */
if (lvl == NIX_TXSCH_LVL_TL1 && req_schq > txsch->schq.max) if (lvl == NIX_TXSCH_LVL_TL1) {
if (req->schq_contig[lvl] ||
req->schq[lvl] > 2 ||
rvu_get_tl1_schqs(rvu, blkaddr,
pcifunc, NULL, NULL))
goto err; goto err;
continue;
}
/* Check if request is valid */ /* Check if request is valid */
if (!req_schq || req_schq > MAX_TXSCHQ_PER_FUNC) if (req_schq > MAX_TXSCHQ_PER_FUNC)
goto err; goto err;
/* If contiguous queues are needed, check for availability */ /* If contiguous queues are needed, check for availability */
...@@ -1034,16 +1122,32 @@ int rvu_mbox_handler_nix_txsch_alloc(struct rvu *rvu, ...@@ -1034,16 +1122,32 @@ int rvu_mbox_handler_nix_txsch_alloc(struct rvu *rvu,
for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) { for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
txsch = &nix_hw->txsch[lvl]; txsch = &nix_hw->txsch[lvl];
rsp->schq_contig[lvl] = req->schq_contig[lvl]; rsp->schq_contig[lvl] = req->schq_contig[lvl];
pfvf_map = txsch->pfvf_map;
rsp->schq[lvl] = req->schq[lvl]; rsp->schq[lvl] = req->schq[lvl];
schq = 0; if (!req->schq[lvl] && !req->schq_contig[lvl])
continue;
/* Handle TL1 specially as it is
* allocation is restricted to 2 TL1's
* per link
*/
if (lvl == NIX_TXSCH_LVL_TL1) {
rsp->schq_contig[lvl] = 0;
rvu_get_tl1_schqs(rvu, blkaddr, pcifunc,
&rsp->schq_list[lvl][0],
&rsp->schq[lvl]);
continue;
}
/* Alloc contiguous queues first */ /* Alloc contiguous queues first */
if (req->schq_contig[lvl]) { if (req->schq_contig[lvl]) {
schq = rvu_alloc_rsrc_contig(&txsch->schq, schq = rvu_alloc_rsrc_contig(&txsch->schq,
req->schq_contig[lvl]); req->schq_contig[lvl]);
for (idx = 0; idx < req->schq_contig[lvl]; idx++) { for (idx = 0; idx < req->schq_contig[lvl]; idx++) {
txsch->pfvf_map[schq] = pcifunc; pfvf_map[schq] = TXSCH_MAP(pcifunc, 0);
nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq); nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
nix_reset_tx_shaping(rvu, blkaddr, lvl, schq); nix_reset_tx_shaping(rvu, blkaddr, lvl, schq);
rsp->schq_contig_list[lvl][idx] = schq; rsp->schq_contig_list[lvl][idx] = schq;
...@@ -1054,7 +1158,7 @@ int rvu_mbox_handler_nix_txsch_alloc(struct rvu *rvu, ...@@ -1054,7 +1158,7 @@ int rvu_mbox_handler_nix_txsch_alloc(struct rvu *rvu,
/* Alloc non-contiguous queues */ /* Alloc non-contiguous queues */
for (idx = 0; idx < req->schq[lvl]; idx++) { for (idx = 0; idx < req->schq[lvl]; idx++) {
schq = rvu_alloc_rsrc(&txsch->schq); schq = rvu_alloc_rsrc(&txsch->schq);
txsch->pfvf_map[schq] = pcifunc; pfvf_map[schq] = TXSCH_MAP(pcifunc, 0);
nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq); nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
nix_reset_tx_shaping(rvu, blkaddr, lvl, schq); nix_reset_tx_shaping(rvu, blkaddr, lvl, schq);
rsp->schq_list[lvl][idx] = schq; rsp->schq_list[lvl][idx] = schq;
...@@ -1096,7 +1200,7 @@ static int nix_txschq_free(struct rvu *rvu, u16 pcifunc) ...@@ -1096,7 +1200,7 @@ static int nix_txschq_free(struct rvu *rvu, u16 pcifunc)
txsch = &nix_hw->txsch[lvl]; txsch = &nix_hw->txsch[lvl];
for (schq = 0; schq < txsch->schq.max; schq++) { for (schq = 0; schq < txsch->schq.max; schq++) {
if (txsch->pfvf_map[schq] != pcifunc) if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
continue; continue;
nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq); nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
} }
...@@ -1105,7 +1209,7 @@ static int nix_txschq_free(struct rvu *rvu, u16 pcifunc) ...@@ -1105,7 +1209,7 @@ static int nix_txschq_free(struct rvu *rvu, u16 pcifunc)
/* Flush SMQs */ /* Flush SMQs */
txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ]; txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ];
for (schq = 0; schq < txsch->schq.max; schq++) { for (schq = 0; schq < txsch->schq.max; schq++) {
if (txsch->pfvf_map[schq] != pcifunc) if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
continue; continue;
cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq)); cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq));
/* Do SMQ flush and set enqueue xoff */ /* Do SMQ flush and set enqueue xoff */
...@@ -1123,9 +1227,15 @@ static int nix_txschq_free(struct rvu *rvu, u16 pcifunc) ...@@ -1123,9 +1227,15 @@ static int nix_txschq_free(struct rvu *rvu, u16 pcifunc)
/* Now free scheduler queues to free pool */ /* Now free scheduler queues to free pool */
for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) { for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
/* Free all SCHQ's except TL1 as
* TL1 is shared across all VF's for a RVU PF
*/
if (lvl == NIX_TXSCH_LVL_TL1)
continue;
txsch = &nix_hw->txsch[lvl]; txsch = &nix_hw->txsch[lvl];
for (schq = 0; schq < txsch->schq.max; schq++) { for (schq = 0; schq < txsch->schq.max; schq++) {
if (txsch->pfvf_map[schq] != pcifunc) if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
continue; continue;
rvu_free_rsrc(&txsch->schq, schq); rvu_free_rsrc(&txsch->schq, schq);
txsch->pfvf_map[schq] = 0; txsch->pfvf_map[schq] = 0;
...@@ -1187,16 +1297,73 @@ static bool is_txschq_config_valid(struct rvu *rvu, u16 pcifunc, int blkaddr, ...@@ -1187,16 +1297,73 @@ static bool is_txschq_config_valid(struct rvu *rvu, u16 pcifunc, int blkaddr,
return true; return true;
} }
static int
nix_tl1_default_cfg(struct rvu *rvu, u16 pcifunc)
{
u16 schq_list[2], schq_cnt, schq;
int blkaddr, idx, err = 0;
u16 map_func, map_flags;
struct nix_hw *nix_hw;
u64 reg, regval;
u32 *pfvf_map;
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
if (blkaddr < 0)
return NIX_AF_ERR_AF_LF_INVALID;
nix_hw = get_nix_hw(rvu->hw, blkaddr);
if (!nix_hw)
return -EINVAL;
pfvf_map = nix_hw->txsch[NIX_TXSCH_LVL_TL1].pfvf_map;
mutex_lock(&rvu->rsrc_lock);
err = rvu_get_tl1_schqs(rvu, blkaddr,
pcifunc, schq_list, &schq_cnt);
if (err)
goto unlock;
for (idx = 0; idx < schq_cnt; idx++) {
schq = schq_list[idx];
map_func = TXSCH_MAP_FUNC(pfvf_map[schq]);
map_flags = TXSCH_MAP_FLAGS(pfvf_map[schq]);
/* check if config is already done or this is pf */
if (map_flags & NIX_TXSCHQ_TL1_CFG_DONE)
continue;
/* default configuration */
reg = NIX_AF_TL1X_TOPOLOGY(schq);
regval = (TXSCH_TL1_DFLT_RR_PRIO << 1);
rvu_write64(rvu, blkaddr, reg, regval);
reg = NIX_AF_TL1X_SCHEDULE(schq);
regval = TXSCH_TL1_DFLT_RR_QTM;
rvu_write64(rvu, blkaddr, reg, regval);
reg = NIX_AF_TL1X_CIR(schq);
regval = 0;
rvu_write64(rvu, blkaddr, reg, regval);
map_flags |= NIX_TXSCHQ_TL1_CFG_DONE;
pfvf_map[schq] = TXSCH_MAP(map_func, map_flags);
}
unlock:
mutex_unlock(&rvu->rsrc_lock);
return err;
}
int rvu_mbox_handler_nix_txschq_cfg(struct rvu *rvu, int rvu_mbox_handler_nix_txschq_cfg(struct rvu *rvu,
struct nix_txschq_config *req, struct nix_txschq_config *req,
struct msg_rsp *rsp) struct msg_rsp *rsp)
{ {
u16 schq, pcifunc = req->hdr.pcifunc;
struct rvu_hwinfo *hw = rvu->hw; struct rvu_hwinfo *hw = rvu->hw;
u16 pcifunc = req->hdr.pcifunc;
u64 reg, regval, schq_regbase; u64 reg, regval, schq_regbase;
struct nix_txsch *txsch; struct nix_txsch *txsch;
u16 map_func, map_flags;
struct nix_hw *nix_hw; struct nix_hw *nix_hw;
int blkaddr, idx, err; int blkaddr, idx, err;
u32 *pfvf_map;
int nixlf; int nixlf;
if (req->lvl >= NIX_TXSCH_LVL_CNT || if (req->lvl >= NIX_TXSCH_LVL_CNT ||
...@@ -1216,6 +1383,16 @@ int rvu_mbox_handler_nix_txschq_cfg(struct rvu *rvu, ...@@ -1216,6 +1383,16 @@ int rvu_mbox_handler_nix_txschq_cfg(struct rvu *rvu,
return NIX_AF_ERR_AF_LF_INVALID; return NIX_AF_ERR_AF_LF_INVALID;
txsch = &nix_hw->txsch[req->lvl]; txsch = &nix_hw->txsch[req->lvl];
pfvf_map = txsch->pfvf_map;
/* VF is only allowed to trigger
* setting default cfg on TL1
*/
if (pcifunc & RVU_PFVF_FUNC_MASK &&
req->lvl == NIX_TXSCH_LVL_TL1) {
return nix_tl1_default_cfg(rvu, pcifunc);
}
for (idx = 0; idx < req->num_regs; idx++) { for (idx = 0; idx < req->num_regs; idx++) {
reg = req->reg[idx]; reg = req->reg[idx];
regval = req->regval[idx]; regval = req->regval[idx];
...@@ -1233,6 +1410,21 @@ int rvu_mbox_handler_nix_txschq_cfg(struct rvu *rvu, ...@@ -1233,6 +1410,21 @@ int rvu_mbox_handler_nix_txschq_cfg(struct rvu *rvu,
regval |= ((u64)nixlf << 24); regval |= ((u64)nixlf << 24);
} }
/* Mark config as done for TL1 by PF */
if (schq_regbase >= NIX_AF_TL1X_SCHEDULE(0) &&
schq_regbase <= NIX_AF_TL1X_GREEN_BYTES(0)) {
schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT);
mutex_lock(&rvu->rsrc_lock);
map_func = TXSCH_MAP_FUNC(pfvf_map[schq]);
map_flags = TXSCH_MAP_FLAGS(pfvf_map[schq]);
map_flags |= NIX_TXSCHQ_TL1_CFG_DONE;
pfvf_map[schq] = TXSCH_MAP(map_func, map_flags);
mutex_unlock(&rvu->rsrc_lock);
}
rvu_write64(rvu, blkaddr, reg, regval); rvu_write64(rvu, blkaddr, reg, regval);
/* Check for SMQ flush, if so, poll for its completion */ /* Check for SMQ flush, if so, poll for its completion */
...@@ -1559,9 +1751,10 @@ static int nix_setup_txschq(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr) ...@@ -1559,9 +1751,10 @@ static int nix_setup_txschq(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
* PF/VF pcifunc mapping info. * PF/VF pcifunc mapping info.
*/ */
txsch->pfvf_map = devm_kcalloc(rvu->dev, txsch->schq.max, txsch->pfvf_map = devm_kcalloc(rvu->dev, txsch->schq.max,
sizeof(u16), GFP_KERNEL); sizeof(u32), GFP_KERNEL);
if (!txsch->pfvf_map) if (!txsch->pfvf_map)
return -ENOMEM; return -ENOMEM;
memset(txsch->pfvf_map, U8_MAX, txsch->schq.max * sizeof(u32));
} }
return 0; return 0;
} }
...@@ -2020,7 +2213,7 @@ int rvu_mbox_handler_nix_set_hw_frs(struct rvu *rvu, struct nix_frs_cfg *req, ...@@ -2020,7 +2213,7 @@ int rvu_mbox_handler_nix_set_hw_frs(struct rvu *rvu, struct nix_frs_cfg *req,
txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ]; txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ];
mutex_lock(&rvu->rsrc_lock); mutex_lock(&rvu->rsrc_lock);
for (schq = 0; schq < txsch->schq.max; schq++) { for (schq = 0; schq < txsch->schq.max; schq++) {
if (txsch->pfvf_map[schq] != pcifunc) if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
continue; continue;
cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq)); cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq));
cfg = (cfg & ~(0xFFFFULL << 8)) | ((u64)req->maxlen << 8); cfg = (cfg & ~(0xFFFFULL << 8)) | ((u64)req->maxlen << 8);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment