Commit 662c9b22 authored by Edwin Peer's avatar Edwin Peer Committed by Jakub Kicinski

bnxt_en: improve VF error messages when PF is unavailable

The current driver design relies on the PF netdev being open in order
to intercept the following HWRM commands from a VF:
    - HWRM_FUNC_VF_CFG
    - HWRM_CFA_L2_FILTER_ALLOC
    - HWRM_PORT_PHY_QCFG (only if FW_CAP_LINK_ADMIN is not supported)

If the PF is closed, then VFs are subjected to rather inscrutable error
messages in response to any configuration requests involving the above
command types. Recent firmware distinguishes this problem case from
other errors by returning HWRM_ERR_CODE_PF_UNAVAILABLE. In most cases,
the appropriate course of action is still to fail, but this can now be
accomplished with the aid of more user informative log messages. For L2
filter allocations that are already asynchronous, an automatic retry
seems more appropriate.

v2: Delete extra newline.
Signed-off-by: default avatarEdwin Peer <edwin.peer@broadcom.com>
Signed-off-by: default avatarMichael Chan <michael.chan@broadcom.com>
Signed-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parent 8fa4219d
...@@ -8637,7 +8637,10 @@ static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init) ...@@ -8637,7 +8637,10 @@ static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
/* Filter for default vnic 0 */ /* Filter for default vnic 0 */
rc = bnxt_hwrm_set_vnic_filter(bp, 0, 0, bp->dev->dev_addr); rc = bnxt_hwrm_set_vnic_filter(bp, 0, 0, bp->dev->dev_addr);
if (rc) { if (rc) {
netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc); if (BNXT_VF(bp) && rc == -ENODEV)
netdev_err(bp->dev, "Cannot configure L2 filter while PF is unavailable\n");
else
netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc);
goto err_out; goto err_out;
} }
vnic->uc_filter_count = 1; vnic->uc_filter_count = 1;
...@@ -9430,6 +9433,10 @@ int bnxt_update_link(struct bnxt *bp, bool chng_link_state) ...@@ -9430,6 +9433,10 @@ int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
rc = hwrm_req_send(bp, req); rc = hwrm_req_send(bp, req);
if (rc) { if (rc) {
hwrm_req_drop(bp, req); hwrm_req_drop(bp, req);
if (BNXT_VF(bp) && rc == -ENODEV) {
netdev_warn(bp->dev, "Cannot obtain link state while PF unavailable.\n");
rc = 0;
}
return rc; return rc;
} }
...@@ -10828,12 +10835,21 @@ static int bnxt_cfg_rx_mode(struct bnxt *bp) ...@@ -10828,12 +10835,21 @@ static int bnxt_cfg_rx_mode(struct bnxt *bp)
for (i = 1, off = 0; i < vnic->uc_filter_count; i++, off += ETH_ALEN) { for (i = 1, off = 0; i < vnic->uc_filter_count; i++, off += ETH_ALEN) {
rc = bnxt_hwrm_set_vnic_filter(bp, 0, i, vnic->uc_list + off); rc = bnxt_hwrm_set_vnic_filter(bp, 0, i, vnic->uc_list + off);
if (rc) { if (rc) {
netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", if (BNXT_VF(bp) && rc == -ENODEV) {
rc); if (!test_and_set_bit(BNXT_STATE_L2_FILTER_RETRY, &bp->state))
netdev_warn(bp->dev, "Cannot configure L2 filters while PF is unavailable, will retry\n");
else
netdev_dbg(bp->dev, "PF still unavailable while configuring L2 filters.\n");
rc = 0;
} else {
netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc);
}
vnic->uc_filter_count = i; vnic->uc_filter_count = i;
return rc; return rc;
} }
} }
if (test_and_clear_bit(BNXT_STATE_L2_FILTER_RETRY, &bp->state))
netdev_notice(bp->dev, "Retry of L2 filter configuration successful.\n");
skip_uc: skip_uc:
if ((vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS) && if ((vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS) &&
...@@ -11398,6 +11414,11 @@ static void bnxt_timer(struct timer_list *t) ...@@ -11398,6 +11414,11 @@ static void bnxt_timer(struct timer_list *t)
} }
} }
if (test_bit(BNXT_STATE_L2_FILTER_RETRY, &bp->state)) {
set_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event);
bnxt_queue_sp_work(bp);
}
if ((bp->flags & BNXT_FLAG_CHIP_P5) && !bp->chip_rev && if ((bp->flags & BNXT_FLAG_CHIP_P5) && !bp->chip_rev &&
netif_carrier_ok(dev)) { netif_carrier_ok(dev)) {
set_bit(BNXT_RING_COAL_NOW_SP_EVENT, &bp->sp_event); set_bit(BNXT_RING_COAL_NOW_SP_EVENT, &bp->sp_event);
...@@ -13104,7 +13125,7 @@ static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh) ...@@ -13104,7 +13125,7 @@ static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh)
bp->tx_nr_rings = bp->tx_nr_rings_per_tc; bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
rc = __bnxt_reserve_rings(bp); rc = __bnxt_reserve_rings(bp);
if (rc) if (rc && rc != -ENODEV)
netdev_warn(bp->dev, "Unable to reserve tx rings\n"); netdev_warn(bp->dev, "Unable to reserve tx rings\n");
bp->tx_nr_rings_per_tc = bp->tx_nr_rings; bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
if (sh) if (sh)
...@@ -13113,7 +13134,7 @@ static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh) ...@@ -13113,7 +13134,7 @@ static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh)
/* Rings may have been trimmed, re-reserve the trimmed rings. */ /* Rings may have been trimmed, re-reserve the trimmed rings. */
if (bnxt_need_reserve_rings(bp)) { if (bnxt_need_reserve_rings(bp)) {
rc = __bnxt_reserve_rings(bp); rc = __bnxt_reserve_rings(bp);
if (rc) if (rc && rc != -ENODEV)
netdev_warn(bp->dev, "2nd rings reservation failed.\n"); netdev_warn(bp->dev, "2nd rings reservation failed.\n");
bp->tx_nr_rings_per_tc = bp->tx_nr_rings; bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
} }
...@@ -13139,7 +13160,10 @@ static int bnxt_init_dflt_ring_mode(struct bnxt *bp) ...@@ -13139,7 +13160,10 @@ static int bnxt_init_dflt_ring_mode(struct bnxt *bp)
bnxt_clear_int_mode(bp); bnxt_clear_int_mode(bp);
rc = bnxt_set_dflt_rings(bp, true); rc = bnxt_set_dflt_rings(bp, true);
if (rc) { if (rc) {
netdev_err(bp->dev, "Not enough rings available.\n"); if (BNXT_VF(bp) && rc == -ENODEV)
netdev_err(bp->dev, "Cannot configure VF rings while PF is unavailable.\n");
else
netdev_err(bp->dev, "Not enough rings available.\n");
goto init_dflt_ring_err; goto init_dflt_ring_err;
} }
rc = bnxt_init_int_mode(bp); rc = bnxt_init_int_mode(bp);
...@@ -13427,8 +13451,12 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -13427,8 +13451,12 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
bnxt_set_ring_params(bp); bnxt_set_ring_params(bp);
rc = bnxt_set_dflt_rings(bp, true); rc = bnxt_set_dflt_rings(bp, true);
if (rc) { if (rc) {
netdev_err(bp->dev, "Not enough rings available.\n"); if (BNXT_VF(bp) && rc == -ENODEV) {
rc = -ENOMEM; netdev_err(bp->dev, "Cannot configure VF rings while PF is unavailable.\n");
} else {
netdev_err(bp->dev, "Not enough rings available.\n");
rc = -ENOMEM;
}
goto init_err_pci_clean; goto init_err_pci_clean;
} }
......
...@@ -1916,6 +1916,7 @@ struct bnxt { ...@@ -1916,6 +1916,7 @@ struct bnxt {
#define BNXT_STATE_DRV_REGISTERED 7 #define BNXT_STATE_DRV_REGISTERED 7
#define BNXT_STATE_PCI_CHANNEL_IO_FROZEN 8 #define BNXT_STATE_PCI_CHANNEL_IO_FROZEN 8
#define BNXT_STATE_NAPI_DISABLED 9 #define BNXT_STATE_NAPI_DISABLED 9
#define BNXT_STATE_L2_FILTER_RETRY 10
#define BNXT_STATE_FW_ACTIVATE 11 #define BNXT_STATE_FW_ACTIVATE 11
#define BNXT_STATE_RECOVER 12 #define BNXT_STATE_RECOVER 12
#define BNXT_STATE_FW_NON_FATAL_COND 13 #define BNXT_STATE_FW_NON_FATAL_COND 13
......
...@@ -359,6 +359,8 @@ static int __hwrm_to_stderr(u32 hwrm_err) ...@@ -359,6 +359,8 @@ static int __hwrm_to_stderr(u32 hwrm_err)
return -EAGAIN; return -EAGAIN;
case HWRM_ERR_CODE_CMD_NOT_SUPPORTED: case HWRM_ERR_CODE_CMD_NOT_SUPPORTED:
return -EOPNOTSUPP; return -EOPNOTSUPP;
case HWRM_ERR_CODE_PF_UNAVAILABLE:
return -ENODEV;
default: default:
return -EIO; return -EIO;
} }
...@@ -648,7 +650,7 @@ static int __hwrm_send(struct bnxt *bp, struct bnxt_hwrm_ctx *ctx) ...@@ -648,7 +650,7 @@ static int __hwrm_send(struct bnxt *bp, struct bnxt_hwrm_ctx *ctx)
if (rc == HWRM_ERR_CODE_BUSY && !(ctx->flags & BNXT_HWRM_CTX_SILENT)) if (rc == HWRM_ERR_CODE_BUSY && !(ctx->flags & BNXT_HWRM_CTX_SILENT))
netdev_warn(bp->dev, "FW returned busy, hwrm req_type 0x%x\n", netdev_warn(bp->dev, "FW returned busy, hwrm req_type 0x%x\n",
req_type); req_type);
else if (rc) else if (rc && rc != HWRM_ERR_CODE_PF_UNAVAILABLE)
hwrm_err(bp, ctx, "hwrm req_type 0x%x seq id 0x%x error 0x%x\n", hwrm_err(bp, ctx, "hwrm req_type 0x%x seq id 0x%x error 0x%x\n",
req_type, token->seq_id, rc); req_type, token->seq_id, rc);
rc = __hwrm_to_stderr(rc); rc = __hwrm_to_stderr(rc);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment