Commit d31cd579 authored by Michael Chan's avatar Michael Chan Committed by David S. Miller

bnxt_en: Remap TC to hardware queues when configuring PFC.

Initially, the MQPRIO TCs are mapped 1:1 directly to the hardware
queues.  Some of these hardware queues are configured to be lossless.
When PFC is enabled on one of more TCs, we now need to remap the
TCs that have PFC enabled to the lossless hardware queues.

After remapping, we need to close and open the NIC for the new
mapping to take effect.  We also need to reprogram all ETS parameters.
Signed-off-by: default avatarMichael Chan <michael.chan@broadcom.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 2e8ef77e
...@@ -173,44 +173,59 @@ static int bnxt_hwrm_queue_cos2bw_qcfg(struct bnxt *bp, struct ieee_ets *ets) ...@@ -173,44 +173,59 @@ static int bnxt_hwrm_queue_cos2bw_qcfg(struct bnxt *bp, struct ieee_ets *ets)
return 0; return 0;
} }
static int bnxt_hwrm_queue_cfg(struct bnxt *bp, unsigned int lltc_mask) static int bnxt_queue_remap(struct bnxt *bp, unsigned int lltc_mask)
{ {
struct hwrm_queue_cfg_input req = {0}; unsigned long qmap = 0;
int i; int max = bp->max_tc;
int i, j, rc;
if (netif_running(bp->dev)) /* Assign lossless TCs first */
bnxt_tx_disable(bp); for (i = 0, j = 0; i < max; ) {
if (lltc_mask & (1 << i)) {
if (BNXT_LLQ(bp->q_info[j].queue_profile)) {
bp->tc_to_qidx[i] = j;
__set_bit(j, &qmap);
i++;
}
j++;
continue;
}
i++;
}
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_CFG, -1, -1); for (i = 0, j = 0; i < max; i++) {
req.flags = cpu_to_le32(QUEUE_CFG_REQ_FLAGS_PATH_BIDIR); if (lltc_mask & (1 << i))
req.enables = cpu_to_le32(QUEUE_CFG_REQ_ENABLES_SERVICE_PROFILE); continue;
j = find_next_zero_bit(&qmap, max, j);
bp->tc_to_qidx[i] = j;
__set_bit(j, &qmap);
j++;
}
/* Configure lossless queues to lossy first */ if (netif_running(bp->dev)) {
req.service_profile = QUEUE_CFG_REQ_SERVICE_PROFILE_LOSSY; bnxt_close_nic(bp, false, false);
for (i = 0; i < bp->max_tc; i++) { rc = bnxt_open_nic(bp, false, false);
if (BNXT_LLQ(bp->q_info[i].queue_profile)) { if (rc) {
req.queue_id = cpu_to_le32(bp->q_info[i].queue_id); netdev_warn(bp->dev, "failed to open NIC, rc = %d\n", rc);
hwrm_send_message(bp, &req, sizeof(req), return rc;
HWRM_CMD_TIMEOUT);
bp->q_info[i].queue_profile =
QUEUE_CFG_REQ_SERVICE_PROFILE_LOSSY;
} }
} }
if (bp->ieee_ets) {
/* Now configure desired queues to lossless */ int tc = netdev_get_num_tc(bp->dev);
req.service_profile = QUEUE_CFG_REQ_SERVICE_PROFILE_LOSSLESS;
for (i = 0; i < bp->max_tc; i++) { if (!tc)
if (lltc_mask & (1 << i)) { tc = 1;
req.queue_id = cpu_to_le32(bp->q_info[i].queue_id); rc = bnxt_hwrm_queue_cos2bw_cfg(bp, bp->ieee_ets, tc);
hwrm_send_message(bp, &req, sizeof(req), if (rc) {
HWRM_CMD_TIMEOUT); netdev_warn(bp->dev, "failed to config BW, rc = %d\n", rc);
bp->q_info[i].queue_profile = return rc;
QUEUE_CFG_REQ_SERVICE_PROFILE_LOSSLESS; }
rc = bnxt_hwrm_queue_pri2cos_cfg(bp, bp->ieee_ets);
if (rc) {
netdev_warn(bp->dev, "failed to config prio, rc = %d\n", rc);
return rc;
} }
} }
if (netif_running(bp->dev))
bnxt_tx_enable(bp);
return 0; return 0;
} }
...@@ -220,7 +235,7 @@ static int bnxt_hwrm_queue_pfc_cfg(struct bnxt *bp, struct ieee_pfc *pfc) ...@@ -220,7 +235,7 @@ static int bnxt_hwrm_queue_pfc_cfg(struct bnxt *bp, struct ieee_pfc *pfc)
struct ieee_ets *my_ets = bp->ieee_ets; struct ieee_ets *my_ets = bp->ieee_ets;
unsigned int tc_mask = 0, pri_mask = 0; unsigned int tc_mask = 0, pri_mask = 0;
u8 i, pri, lltc_count = 0; u8 i, pri, lltc_count = 0;
bool need_q_recfg = false; bool need_q_remap = false;
int rc; int rc;
if (!my_ets) if (!my_ets)
...@@ -240,21 +255,25 @@ static int bnxt_hwrm_queue_pfc_cfg(struct bnxt *bp, struct ieee_pfc *pfc) ...@@ -240,21 +255,25 @@ static int bnxt_hwrm_queue_pfc_cfg(struct bnxt *bp, struct ieee_pfc *pfc)
if (lltc_count > bp->max_lltc) if (lltc_count > bp->max_lltc)
return -EINVAL; return -EINVAL;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_PFCENABLE_CFG, -1, -1);
req.flags = cpu_to_le32(pri_mask);
rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
if (rc)
return rc;
for (i = 0; i < bp->max_tc; i++) { for (i = 0; i < bp->max_tc; i++) {
if (tc_mask & (1 << i)) { if (tc_mask & (1 << i)) {
if (!BNXT_LLQ(bp->q_info[i].queue_profile)) u8 qidx = bp->tc_to_qidx[i];
need_q_recfg = true;
if (!BNXT_LLQ(bp->q_info[qidx].queue_profile)) {
need_q_remap = true;
break;
}
} }
} }
if (need_q_recfg) if (need_q_remap)
rc = bnxt_hwrm_queue_cfg(bp, tc_mask); rc = bnxt_queue_remap(bp, tc_mask);
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_PFCENABLE_CFG, -1, -1);
req.flags = cpu_to_le32(pri_mask);
rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
if (rc)
return rc;
return rc; return rc;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment