Commit a3280efd authored by David S. Miller's avatar David S. Miller

Merge branch 'octeon-drr-config'

Sunil Goutham says:

====================
cn10k: DWRR MTU and weights configuration

On OcteonTx2 DWRR quantum is directly configured into each of
the transmit scheduler queues. And PF/VF drivers were free to
config any value upto 2^24.

On CN10K, HW is modified, the quantum configuration at scheduler
queues is in terms of weight. And SW needs to setup a base DWRR MTU
at NIX_AF_DWRR_RPM_MTU / NIX_AF_DWRR_SDP_MTU. HW will do
'DWRR MTU * weight' to get the quantum.

This patch series addresses this HW change on CN10K silicons,
both admin function and PF/VF drivers are modified.

Also added support to program DWRR MTU via devlink params.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents cfba3fb6 c39830a4
...@@ -146,10 +146,7 @@ enum nix_scheduler { ...@@ -146,10 +146,7 @@ enum nix_scheduler {
#define TXSCH_RR_QTM_MAX ((1 << 24) - 1) #define TXSCH_RR_QTM_MAX ((1 << 24) - 1)
#define TXSCH_TL1_DFLT_RR_QTM TXSCH_RR_QTM_MAX #define TXSCH_TL1_DFLT_RR_QTM TXSCH_RR_QTM_MAX
#define TXSCH_TL1_DFLT_RR_PRIO (0x1ull) #define TXSCH_TL1_DFLT_RR_PRIO (0x1ull)
#define MAX_SCHED_WEIGHT 0xFF #define CN10K_MAX_DWRR_WEIGHT 16384 /* Weight is 14bit on CN10K */
#define DFLT_RR_WEIGHT 71
#define DFLT_RR_QTM ((DFLT_RR_WEIGHT * TXSCH_RR_QTM_MAX) \
/ MAX_SCHED_WEIGHT)
/* Min/Max packet sizes, excluding FCS */ /* Min/Max packet sizes, excluding FCS */
#define NIC_HW_MIN_FRS 40 #define NIC_HW_MIN_FRS 40
......
...@@ -1032,8 +1032,12 @@ struct nix_bp_cfg_rsp { ...@@ -1032,8 +1032,12 @@ struct nix_bp_cfg_rsp {
struct nix_hw_info { struct nix_hw_info {
struct mbox_msghdr hdr; struct mbox_msghdr hdr;
u16 rsvs16;
u16 max_mtu; u16 max_mtu;
u16 min_mtu; u16 min_mtu;
u32 rpm_dwrr_mtu;
u32 sdp_dwrr_mtu;
u64 rsvd[16]; /* Add reserved fields for future expansion */
}; };
struct nix_bandprof_alloc_req { struct nix_bandprof_alloc_req {
......
...@@ -329,6 +329,7 @@ struct hw_cap { ...@@ -329,6 +329,7 @@ struct hw_cap {
bool nix_shaping; /* Is shaping and coloring supported */ bool nix_shaping; /* Is shaping and coloring supported */
bool nix_tx_link_bp; /* Can link backpressure TL queues ? */ bool nix_tx_link_bp; /* Can link backpressure TL queues ? */
bool nix_rx_multicast; /* Rx packet replication support */ bool nix_rx_multicast; /* Rx packet replication support */
bool nix_common_dwrr_mtu; /* Common DWRR MTU for quantum config */
bool per_pf_mbox_regs; /* PF mbox specified in per PF registers ? */ bool per_pf_mbox_regs; /* PF mbox specified in per PF registers ? */
bool programmable_chans; /* Channels programmable ? */ bool programmable_chans; /* Channels programmable ? */
bool ipolicer; bool ipolicer;
...@@ -706,6 +707,8 @@ int nix_aq_context_read(struct rvu *rvu, struct nix_hw *nix_hw, ...@@ -706,6 +707,8 @@ int nix_aq_context_read(struct rvu *rvu, struct nix_hw *nix_hw,
struct nix_cn10k_aq_enq_rsp *aq_rsp, struct nix_cn10k_aq_enq_rsp *aq_rsp,
u16 pcifunc, u8 ctype, u32 qidx); u16 pcifunc, u8 ctype, u32 qidx);
int rvu_get_nix_blkaddr(struct rvu *rvu, u16 pcifunc); int rvu_get_nix_blkaddr(struct rvu *rvu, u16 pcifunc);
u32 convert_dwrr_mtu_to_bytes(u8 dwrr_mtu);
u32 convert_bytes_to_dwrr_mtu(u32 bytes);
/* NPC APIs */ /* NPC APIs */
int rvu_npc_init(struct rvu *rvu); int rvu_npc_init(struct rvu *rvu);
......
...@@ -1364,6 +1364,89 @@ static void rvu_health_reporters_destroy(struct rvu *rvu) ...@@ -1364,6 +1364,89 @@ static void rvu_health_reporters_destroy(struct rvu *rvu)
rvu_nix_health_reporters_destroy(rvu_dl); rvu_nix_health_reporters_destroy(rvu_dl);
} }
/* Devlink Params APIs */
static int rvu_af_dl_dwrr_mtu_validate(struct devlink *devlink, u32 id,
union devlink_param_value val,
struct netlink_ext_ack *extack)
{
struct rvu_devlink *rvu_dl = devlink_priv(devlink);
struct rvu *rvu = rvu_dl->rvu;
int dwrr_mtu = val.vu32;
struct nix_txsch *txsch;
struct nix_hw *nix_hw;
if (!rvu->hw->cap.nix_common_dwrr_mtu) {
NL_SET_ERR_MSG_MOD(extack,
"Setting DWRR_MTU is not supported on this silicon");
return -EOPNOTSUPP;
}
if ((dwrr_mtu > 65536 || !is_power_of_2(dwrr_mtu)) &&
(dwrr_mtu != 9728 && dwrr_mtu != 10240)) {
NL_SET_ERR_MSG_MOD(extack,
"Invalid, supported MTUs are 0,2,4,8.16,32,64....4K,8K,32K,64K and 9728, 10240");
return -EINVAL;
}
nix_hw = get_nix_hw(rvu->hw, BLKADDR_NIX0);
if (!nix_hw)
return -ENODEV;
txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ];
if (rvu_rsrc_free_count(&txsch->schq) != txsch->schq.max) {
NL_SET_ERR_MSG_MOD(extack,
"Changing DWRR MTU is not supported when there are active NIXLFs");
NL_SET_ERR_MSG_MOD(extack,
"Makesure none of the PF/VF interfaces are initialized and retry");
return -EOPNOTSUPP;
}
return 0;
}
static int rvu_af_dl_dwrr_mtu_set(struct devlink *devlink, u32 id,
struct devlink_param_gset_ctx *ctx)
{
struct rvu_devlink *rvu_dl = devlink_priv(devlink);
struct rvu *rvu = rvu_dl->rvu;
u64 dwrr_mtu;
dwrr_mtu = convert_bytes_to_dwrr_mtu(ctx->val.vu32);
rvu_write64(rvu, BLKADDR_NIX0, NIX_AF_DWRR_RPM_MTU, dwrr_mtu);
return 0;
}
static int rvu_af_dl_dwrr_mtu_get(struct devlink *devlink, u32 id,
struct devlink_param_gset_ctx *ctx)
{
struct rvu_devlink *rvu_dl = devlink_priv(devlink);
struct rvu *rvu = rvu_dl->rvu;
u64 dwrr_mtu;
if (!rvu->hw->cap.nix_common_dwrr_mtu)
return -EOPNOTSUPP;
dwrr_mtu = rvu_read64(rvu, BLKADDR_NIX0, NIX_AF_DWRR_RPM_MTU);
ctx->val.vu32 = convert_dwrr_mtu_to_bytes(dwrr_mtu);
return 0;
}
enum rvu_af_dl_param_id {
RVU_AF_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX,
RVU_AF_DEVLINK_PARAM_ID_DWRR_MTU,
};
static const struct devlink_param rvu_af_dl_params[] = {
DEVLINK_PARAM_DRIVER(RVU_AF_DEVLINK_PARAM_ID_DWRR_MTU,
"dwrr_mtu", DEVLINK_PARAM_TYPE_U32,
BIT(DEVLINK_PARAM_CMODE_RUNTIME),
rvu_af_dl_dwrr_mtu_get, rvu_af_dl_dwrr_mtu_set,
rvu_af_dl_dwrr_mtu_validate),
};
/* Devlink switch mode */
static int rvu_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode) static int rvu_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
{ {
struct rvu_devlink *rvu_dl = devlink_priv(devlink); struct rvu_devlink *rvu_dl = devlink_priv(devlink);
...@@ -1438,7 +1521,30 @@ int rvu_register_dl(struct rvu *rvu) ...@@ -1438,7 +1521,30 @@ int rvu_register_dl(struct rvu *rvu)
rvu_dl->rvu = rvu; rvu_dl->rvu = rvu;
rvu->rvu_dl = rvu_dl; rvu->rvu_dl = rvu_dl;
return rvu_health_reporters_create(rvu); err = rvu_health_reporters_create(rvu);
if (err) {
dev_err(rvu->dev,
"devlink health reporter creation failed with error %d\n", err);
goto err_dl_health;
}
err = devlink_params_register(dl, rvu_af_dl_params,
ARRAY_SIZE(rvu_af_dl_params));
if (err) {
dev_err(rvu->dev,
"devlink params register failed with error %d", err);
goto err_dl_health;
}
devlink_params_publish(dl);
return 0;
err_dl_health:
rvu_health_reporters_destroy(rvu);
devlink_unregister(dl);
devlink_free(dl);
return err;
} }
void rvu_unregister_dl(struct rvu *rvu) void rvu_unregister_dl(struct rvu *rvu)
...@@ -1449,6 +1555,8 @@ void rvu_unregister_dl(struct rvu *rvu) ...@@ -1449,6 +1555,8 @@ void rvu_unregister_dl(struct rvu *rvu)
if (!dl) if (!dl)
return; return;
devlink_params_unregister(dl, rvu_af_dl_params,
ARRAY_SIZE(rvu_af_dl_params));
rvu_health_reporters_destroy(rvu); rvu_health_reporters_destroy(rvu);
devlink_unregister(dl); devlink_unregister(dl);
devlink_free(dl); devlink_free(dl);
......
...@@ -192,6 +192,47 @@ struct nix_hw *get_nix_hw(struct rvu_hwinfo *hw, int blkaddr) ...@@ -192,6 +192,47 @@ struct nix_hw *get_nix_hw(struct rvu_hwinfo *hw, int blkaddr)
return NULL; return NULL;
} }
u32 convert_dwrr_mtu_to_bytes(u8 dwrr_mtu)
{
dwrr_mtu &= 0x1FULL;
/* MTU used for DWRR calculation is in power of 2 up until 64K bytes.
* Value of 4 is reserved for MTU value of 9728 bytes.
* Value of 5 is reserved for MTU value of 10240 bytes.
*/
switch (dwrr_mtu) {
case 4:
return 9728;
case 5:
return 10240;
default:
return BIT_ULL(dwrr_mtu);
}
return 0;
}
u32 convert_bytes_to_dwrr_mtu(u32 bytes)
{
/* MTU used for DWRR calculation is in power of 2 up until 64K bytes.
* Value of 4 is reserved for MTU value of 9728 bytes.
* Value of 5 is reserved for MTU value of 10240 bytes.
*/
if (bytes > BIT_ULL(16))
return 0;
switch (bytes) {
case 9728:
return 4;
case 10240:
return 5;
default:
return ilog2(bytes);
}
return 0;
}
static void nix_rx_sync(struct rvu *rvu, int blkaddr) static void nix_rx_sync(struct rvu *rvu, int blkaddr)
{ {
int err; int err;
...@@ -1958,8 +1999,17 @@ static void nix_tl1_default_cfg(struct rvu *rvu, struct nix_hw *nix_hw, ...@@ -1958,8 +1999,17 @@ static void nix_tl1_default_cfg(struct rvu *rvu, struct nix_hw *nix_hw,
return; return;
rvu_write64(rvu, blkaddr, NIX_AF_TL1X_TOPOLOGY(schq), rvu_write64(rvu, blkaddr, NIX_AF_TL1X_TOPOLOGY(schq),
(TXSCH_TL1_DFLT_RR_PRIO << 1)); (TXSCH_TL1_DFLT_RR_PRIO << 1));
rvu_write64(rvu, blkaddr, NIX_AF_TL1X_SCHEDULE(schq),
TXSCH_TL1_DFLT_RR_QTM); /* On OcteonTx2 the config was in bytes and newer silcons
* it's changed to weight.
*/
if (!rvu->hw->cap.nix_common_dwrr_mtu)
rvu_write64(rvu, blkaddr, NIX_AF_TL1X_SCHEDULE(schq),
TXSCH_TL1_DFLT_RR_QTM);
else
rvu_write64(rvu, blkaddr, NIX_AF_TL1X_SCHEDULE(schq),
CN10K_MAX_DWRR_WEIGHT);
rvu_write64(rvu, blkaddr, NIX_AF_TL1X_CIR(schq), 0x00); rvu_write64(rvu, blkaddr, NIX_AF_TL1X_CIR(schq), 0x00);
pfvf_map[schq] = TXSCH_SET_FLAG(pfvf_map[schq], NIX_TXSCHQ_CFG_DONE); pfvf_map[schq] = TXSCH_SET_FLAG(pfvf_map[schq], NIX_TXSCHQ_CFG_DONE);
} }
...@@ -2667,6 +2717,15 @@ static int nix_setup_txschq(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr) ...@@ -2667,6 +2717,15 @@ static int nix_setup_txschq(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
for (schq = 0; schq < txsch->schq.max; schq++) for (schq = 0; schq < txsch->schq.max; schq++)
txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE); txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE);
} }
/* Setup a default value of 8192 as DWRR MTU */
if (rvu->hw->cap.nix_common_dwrr_mtu) {
rvu_write64(rvu, blkaddr, NIX_AF_DWRR_RPM_MTU,
convert_bytes_to_dwrr_mtu(8192));
rvu_write64(rvu, blkaddr, NIX_AF_DWRR_SDP_MTU,
convert_bytes_to_dwrr_mtu(8192));
}
return 0; return 0;
} }
...@@ -2743,6 +2802,7 @@ int rvu_mbox_handler_nix_get_hw_info(struct rvu *rvu, struct msg_req *req, ...@@ -2743,6 +2802,7 @@ int rvu_mbox_handler_nix_get_hw_info(struct rvu *rvu, struct msg_req *req,
struct nix_hw_info *rsp) struct nix_hw_info *rsp)
{ {
u16 pcifunc = req->hdr.pcifunc; u16 pcifunc = req->hdr.pcifunc;
u64 dwrr_mtu;
int blkaddr; int blkaddr;
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
...@@ -2755,6 +2815,20 @@ int rvu_mbox_handler_nix_get_hw_info(struct rvu *rvu, struct msg_req *req, ...@@ -2755,6 +2815,20 @@ int rvu_mbox_handler_nix_get_hw_info(struct rvu *rvu, struct msg_req *req,
rvu_get_lmac_link_max_frs(rvu, &rsp->max_mtu); rvu_get_lmac_link_max_frs(rvu, &rsp->max_mtu);
rsp->min_mtu = NIC_HW_MIN_FRS; rsp->min_mtu = NIC_HW_MIN_FRS;
if (!rvu->hw->cap.nix_common_dwrr_mtu) {
/* Return '1' on OTx2 */
rsp->rpm_dwrr_mtu = 1;
rsp->sdp_dwrr_mtu = 1;
return 0;
}
dwrr_mtu = rvu_read64(rvu, BLKADDR_NIX0, NIX_AF_DWRR_RPM_MTU);
rsp->rpm_dwrr_mtu = convert_dwrr_mtu_to_bytes(dwrr_mtu);
dwrr_mtu = rvu_read64(rvu, BLKADDR_NIX0, NIX_AF_DWRR_SDP_MTU);
rsp->sdp_dwrr_mtu = convert_dwrr_mtu_to_bytes(dwrr_mtu);
return 0; return 0;
} }
...@@ -3647,6 +3721,28 @@ static int nix_aq_init(struct rvu *rvu, struct rvu_block *block) ...@@ -3647,6 +3721,28 @@ static int nix_aq_init(struct rvu *rvu, struct rvu_block *block)
return 0; return 0;
} }
static void rvu_nix_setup_capabilities(struct rvu *rvu, int blkaddr)
{
struct rvu_hwinfo *hw = rvu->hw;
u64 hw_const;
hw_const = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
/* On OcteonTx2 DWRR quantum is directly configured into each of
* the transmit scheduler queues. And PF/VF drivers were free to
* config any value upto 2^24.
* On CN10K, HW is modified, the quantum configuration at scheduler
* queues is in terms of weight. And SW needs to setup a base DWRR MTU
* at NIX_AF_DWRR_RPM_MTU / NIX_AF_DWRR_SDP_MTU. HW will do
* 'DWRR MTU * weight' to get the quantum.
*
* Check if HW uses a common MTU for all DWRR quantum configs.
* On OcteonTx2 this register field is '0'.
*/
if (((hw_const >> 56) & 0x10) == 0x10)
hw->cap.nix_common_dwrr_mtu = true;
}
static int rvu_nix_block_init(struct rvu *rvu, struct nix_hw *nix_hw) static int rvu_nix_block_init(struct rvu *rvu, struct nix_hw *nix_hw)
{ {
const struct npc_lt_def_cfg *ltdefs; const struct npc_lt_def_cfg *ltdefs;
...@@ -3684,6 +3780,9 @@ static int rvu_nix_block_init(struct rvu *rvu, struct nix_hw *nix_hw) ...@@ -3684,6 +3780,9 @@ static int rvu_nix_block_init(struct rvu *rvu, struct nix_hw *nix_hw)
if (err) if (err)
return err; return err;
/* Setup capabilities of the NIX block */
rvu_nix_setup_capabilities(rvu, blkaddr);
/* Initialize admin queue */ /* Initialize admin queue */
err = nix_aq_init(rvu, block); err = nix_aq_init(rvu, block);
if (err) if (err)
......
...@@ -269,6 +269,8 @@ ...@@ -269,6 +269,8 @@
#define NIX_AF_DEBUG_NPC_RESP_DATAX(a) (0x680 | (a) << 3) #define NIX_AF_DEBUG_NPC_RESP_DATAX(a) (0x680 | (a) << 3)
#define NIX_AF_SMQX_CFG(a) (0x700 | (a) << 16) #define NIX_AF_SMQX_CFG(a) (0x700 | (a) << 16)
#define NIX_AF_SQM_DBG_CTL_STATUS (0x750) #define NIX_AF_SQM_DBG_CTL_STATUS (0x750)
#define NIX_AF_DWRR_SDP_MTU (0x790)
#define NIX_AF_DWRR_RPM_MTU (0x7A0)
#define NIX_AF_PSE_CHANNEL_LEVEL (0x800) #define NIX_AF_PSE_CHANNEL_LEVEL (0x800)
#define NIX_AF_PSE_SHAPER_CFG (0x810) #define NIX_AF_PSE_SHAPER_CFG (0x810)
#define NIX_AF_TX_EXPR_CREDIT (0x830) #define NIX_AF_TX_EXPR_CREDIT (0x830)
......
...@@ -92,8 +92,7 @@ int cn10k_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura) ...@@ -92,8 +92,7 @@ int cn10k_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura)
aq->sq.ena = 1; aq->sq.ena = 1;
/* Only one SMQ is allocated, map all SQ's to that SMQ */ /* Only one SMQ is allocated, map all SQ's to that SMQ */
aq->sq.smq = pfvf->hw.txschq_list[NIX_TXSCH_LVL_SMQ][0]; aq->sq.smq = pfvf->hw.txschq_list[NIX_TXSCH_LVL_SMQ][0];
/* FIXME: set based on NIX_AF_DWRR_RPM_MTU*/ aq->sq.smq_rr_weight = mtu_to_dwrr_weight(pfvf, pfvf->max_frs);
aq->sq.smq_rr_weight = pfvf->netdev->mtu;
aq->sq.default_chan = pfvf->hw.tx_chan_base; aq->sq.default_chan = pfvf->hw.tx_chan_base;
aq->sq.sqe_stype = NIX_STYPE_STF; /* Cache SQB */ aq->sq.sqe_stype = NIX_STYPE_STF; /* Cache SQB */
aq->sq.sqb_aura = sqb_aura; aq->sq.sqb_aura = sqb_aura;
......
...@@ -9,6 +9,20 @@ ...@@ -9,6 +9,20 @@
#include "otx2_common.h" #include "otx2_common.h"
static inline int mtu_to_dwrr_weight(struct otx2_nic *pfvf, int mtu)
{
u32 weight;
/* On OTx2, since AF returns DWRR_MTU as '1', this logic
* will work on those silicons as well.
*/
weight = mtu / pfvf->hw.dwrr_mtu;
if (mtu % pfvf->hw.dwrr_mtu)
weight += 1;
return weight;
}
void cn10k_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq); void cn10k_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq);
void cn10k_sqe_flush(void *dev, struct otx2_snd_queue *sq, int size, int qidx); void cn10k_sqe_flush(void *dev, struct otx2_snd_queue *sq, int size, int qidx);
int cn10k_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura); int cn10k_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura);
......
...@@ -596,6 +596,9 @@ int otx2_txschq_config(struct otx2_nic *pfvf, int lvl) ...@@ -596,6 +596,9 @@ int otx2_txschq_config(struct otx2_nic *pfvf, int lvl)
struct otx2_hw *hw = &pfvf->hw; struct otx2_hw *hw = &pfvf->hw;
struct nix_txschq_config *req; struct nix_txschq_config *req;
u64 schq, parent; u64 schq, parent;
u64 dwrr_val;
dwrr_val = mtu_to_dwrr_weight(pfvf, pfvf->max_frs);
req = otx2_mbox_alloc_msg_nix_txschq_cfg(&pfvf->mbox); req = otx2_mbox_alloc_msg_nix_txschq_cfg(&pfvf->mbox);
if (!req) if (!req)
...@@ -621,21 +624,21 @@ int otx2_txschq_config(struct otx2_nic *pfvf, int lvl) ...@@ -621,21 +624,21 @@ int otx2_txschq_config(struct otx2_nic *pfvf, int lvl)
req->num_regs++; req->num_regs++;
/* Set DWRR quantum */ /* Set DWRR quantum */
req->reg[2] = NIX_AF_MDQX_SCHEDULE(schq); req->reg[2] = NIX_AF_MDQX_SCHEDULE(schq);
req->regval[2] = DFLT_RR_QTM; req->regval[2] = dwrr_val;
} else if (lvl == NIX_TXSCH_LVL_TL4) { } else if (lvl == NIX_TXSCH_LVL_TL4) {
parent = hw->txschq_list[NIX_TXSCH_LVL_TL3][0]; parent = hw->txschq_list[NIX_TXSCH_LVL_TL3][0];
req->reg[0] = NIX_AF_TL4X_PARENT(schq); req->reg[0] = NIX_AF_TL4X_PARENT(schq);
req->regval[0] = parent << 16; req->regval[0] = parent << 16;
req->num_regs++; req->num_regs++;
req->reg[1] = NIX_AF_TL4X_SCHEDULE(schq); req->reg[1] = NIX_AF_TL4X_SCHEDULE(schq);
req->regval[1] = DFLT_RR_QTM; req->regval[1] = dwrr_val;
} else if (lvl == NIX_TXSCH_LVL_TL3) { } else if (lvl == NIX_TXSCH_LVL_TL3) {
parent = hw->txschq_list[NIX_TXSCH_LVL_TL2][0]; parent = hw->txschq_list[NIX_TXSCH_LVL_TL2][0];
req->reg[0] = NIX_AF_TL3X_PARENT(schq); req->reg[0] = NIX_AF_TL3X_PARENT(schq);
req->regval[0] = parent << 16; req->regval[0] = parent << 16;
req->num_regs++; req->num_regs++;
req->reg[1] = NIX_AF_TL3X_SCHEDULE(schq); req->reg[1] = NIX_AF_TL3X_SCHEDULE(schq);
req->regval[1] = DFLT_RR_QTM; req->regval[1] = dwrr_val;
} else if (lvl == NIX_TXSCH_LVL_TL2) { } else if (lvl == NIX_TXSCH_LVL_TL2) {
parent = hw->txschq_list[NIX_TXSCH_LVL_TL1][0]; parent = hw->txschq_list[NIX_TXSCH_LVL_TL1][0];
req->reg[0] = NIX_AF_TL2X_PARENT(schq); req->reg[0] = NIX_AF_TL2X_PARENT(schq);
...@@ -643,7 +646,7 @@ int otx2_txschq_config(struct otx2_nic *pfvf, int lvl) ...@@ -643,7 +646,7 @@ int otx2_txschq_config(struct otx2_nic *pfvf, int lvl)
req->num_regs++; req->num_regs++;
req->reg[1] = NIX_AF_TL2X_SCHEDULE(schq); req->reg[1] = NIX_AF_TL2X_SCHEDULE(schq);
req->regval[1] = TXSCH_TL1_DFLT_RR_PRIO << 24 | DFLT_RR_QTM; req->regval[1] = TXSCH_TL1_DFLT_RR_PRIO << 24 | dwrr_val;
req->num_regs++; req->num_regs++;
req->reg[2] = NIX_AF_TL3_TL2X_LINKX_CFG(schq, req->reg[2] = NIX_AF_TL3_TL2X_LINKX_CFG(schq,
...@@ -656,7 +659,10 @@ int otx2_txschq_config(struct otx2_nic *pfvf, int lvl) ...@@ -656,7 +659,10 @@ int otx2_txschq_config(struct otx2_nic *pfvf, int lvl)
* For VF this is always ignored. * For VF this is always ignored.
*/ */
/* Set DWRR quantum */ /* On CN10K, if RR_WEIGHT is greater than 16384, HW will
* clip it to 16384, so configuring a 24bit max value
* will work on both OTx2 and CN10K.
*/
req->reg[0] = NIX_AF_TL1X_SCHEDULE(schq); req->reg[0] = NIX_AF_TL1X_SCHEDULE(schq);
req->regval[0] = TXSCH_TL1_DFLT_RR_QTM; req->regval[0] = TXSCH_TL1_DFLT_RR_QTM;
...@@ -803,7 +809,7 @@ int otx2_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura) ...@@ -803,7 +809,7 @@ int otx2_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura)
aq->sq.ena = 1; aq->sq.ena = 1;
/* Only one SMQ is allocated, map all SQ's to that SMQ */ /* Only one SMQ is allocated, map all SQ's to that SMQ */
aq->sq.smq = pfvf->hw.txschq_list[NIX_TXSCH_LVL_SMQ][0]; aq->sq.smq = pfvf->hw.txschq_list[NIX_TXSCH_LVL_SMQ][0];
aq->sq.smq_rr_quantum = DFLT_RR_QTM; aq->sq.smq_rr_quantum = mtu_to_dwrr_weight(pfvf, pfvf->max_frs);
aq->sq.default_chan = pfvf->hw.tx_chan_base; aq->sq.default_chan = pfvf->hw.tx_chan_base;
aq->sq.sqe_stype = NIX_STYPE_STF; /* Cache SQB */ aq->sq.sqe_stype = NIX_STYPE_STF; /* Cache SQB */
aq->sq.sqb_aura = sqb_aura; aq->sq.sqb_aura = sqb_aura;
...@@ -1668,6 +1674,11 @@ u16 otx2_get_max_mtu(struct otx2_nic *pfvf) ...@@ -1668,6 +1674,11 @@ u16 otx2_get_max_mtu(struct otx2_nic *pfvf)
* SMQ errors * SMQ errors
*/ */
max_mtu = rsp->max_mtu - 8 - OTX2_ETH_HLEN; max_mtu = rsp->max_mtu - 8 - OTX2_ETH_HLEN;
/* Also save DWRR MTU, needed for DWRR weight calculation */
pfvf->hw.dwrr_mtu = rsp->rpm_dwrr_mtu;
if (!pfvf->hw.dwrr_mtu)
pfvf->hw.dwrr_mtu = 1;
} }
out: out:
......
...@@ -181,6 +181,7 @@ struct otx2_hw { ...@@ -181,6 +181,7 @@ struct otx2_hw {
/* NIX */ /* NIX */
u16 txschq_list[NIX_TXSCH_LVL_CNT][MAX_TXSCHQ_PER_FUNC]; u16 txschq_list[NIX_TXSCH_LVL_CNT][MAX_TXSCHQ_PER_FUNC];
u16 matchall_ipolicer; u16 matchall_ipolicer;
u32 dwrr_mtu;
/* HW settings, coalescing etc */ /* HW settings, coalescing etc */
u16 rx_chan_base; u16 rx_chan_base;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment