Commit 7c380918 authored by Michael Chan's avatar Michael Chan Committed by David S. Miller

bnxt_en: Refactor bnxt_init_one() and turn on TPA support on 57500 chips.

With the new TPA feature in the 57500 chips, we need to discover the
feature first before setting up the netdev features.  Refactor the
the firmware probe and init logic more cleanly into 2 functions and
and make these calls before setting up the netdev features.
Signed-off-by: default avatarMichael Chan <michael.chan@broadcom.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 78e7b866
...@@ -9896,6 +9896,68 @@ static void bnxt_init_dflt_coal(struct bnxt *bp) ...@@ -9896,6 +9896,68 @@ static void bnxt_init_dflt_coal(struct bnxt *bp)
bp->stats_coal_ticks = BNXT_DEF_STATS_COAL_TICKS; bp->stats_coal_ticks = BNXT_DEF_STATS_COAL_TICKS;
} }
static int bnxt_fw_init_one_p1(struct bnxt *bp)
{
int rc;
bp->fw_cap = 0;
rc = bnxt_hwrm_ver_get(bp);
if (rc)
return rc;
if (bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL) {
rc = bnxt_alloc_kong_hwrm_resources(bp);
if (rc)
bp->fw_cap &= ~BNXT_FW_CAP_KONG_MB_CHNL;
}
if ((bp->fw_cap & BNXT_FW_CAP_SHORT_CMD) ||
bp->hwrm_max_ext_req_len > BNXT_HWRM_MAX_REQ_LEN) {
rc = bnxt_alloc_hwrm_short_cmd_req(bp);
if (rc)
return rc;
}
rc = bnxt_hwrm_func_reset(bp);
if (rc)
return -ENODEV;
bnxt_hwrm_fw_set_time(bp);
return 0;
}
static int bnxt_fw_init_one_p2(struct bnxt *bp)
{
int rc;
/* Get the MAX capabilities for this function */
rc = bnxt_hwrm_func_qcaps(bp);
if (rc) {
netdev_err(bp->dev, "hwrm query capability failure rc: %x\n",
rc);
return -ENODEV;
}
rc = bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(bp);
if (rc)
netdev_warn(bp->dev, "hwrm query adv flow mgnt failure rc: %d\n",
rc);
rc = bnxt_hwrm_func_drv_rgtr(bp);
if (rc)
return -ENODEV;
rc = bnxt_hwrm_func_rgtr_async_events(bp, NULL, 0);
if (rc)
return -ENODEV;
bnxt_hwrm_func_qcfg(bp);
bnxt_hwrm_vnic_qcaps(bp);
bnxt_hwrm_port_led_qcaps(bp);
bnxt_ethtool_init(bp);
bnxt_dcb_init(bp);
return 0;
}
static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev) static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev)
{ {
int rc; int rc;
...@@ -10851,32 +10913,18 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -10851,32 +10913,18 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
goto init_err_pci_clean; goto init_err_pci_clean;
mutex_init(&bp->hwrm_cmd_lock); mutex_init(&bp->hwrm_cmd_lock);
rc = bnxt_hwrm_ver_get(bp);
if (rc)
goto init_err_pci_clean;
if (bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL) { rc = bnxt_fw_init_one_p1(bp);
rc = bnxt_alloc_kong_hwrm_resources(bp);
if (rc)
bp->fw_cap &= ~BNXT_FW_CAP_KONG_MB_CHNL;
}
if ((bp->fw_cap & BNXT_FW_CAP_SHORT_CMD) ||
bp->hwrm_max_ext_req_len > BNXT_HWRM_MAX_REQ_LEN) {
rc = bnxt_alloc_hwrm_short_cmd_req(bp);
if (rc) if (rc)
goto init_err_pci_clean; goto init_err_pci_clean;
}
if (BNXT_CHIP_P5(bp)) if (BNXT_CHIP_P5(bp))
bp->flags |= BNXT_FLAG_CHIP_P5; bp->flags |= BNXT_FLAG_CHIP_P5;
rc = bnxt_hwrm_func_reset(bp); rc = bnxt_fw_init_one_p2(bp);
if (rc) if (rc)
goto init_err_pci_clean; goto init_err_pci_clean;
bnxt_hwrm_fw_set_time(bp);
dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG | dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO | NETIF_F_TSO6 |
NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
...@@ -10920,37 +10968,8 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -10920,37 +10968,8 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (!BNXT_CHIP_P4_PLUS(bp)) if (!BNXT_CHIP_P4_PLUS(bp))
bp->flags |= BNXT_FLAG_DOUBLE_DB; bp->flags |= BNXT_FLAG_DOUBLE_DB;
rc = bnxt_hwrm_func_drv_rgtr(bp);
if (rc)
goto init_err_pci_clean;
rc = bnxt_hwrm_func_rgtr_async_events(bp, NULL, 0);
if (rc)
goto init_err_pci_clean;
bp->ulp_probe = bnxt_ulp_probe; bp->ulp_probe = bnxt_ulp_probe;
rc = bnxt_hwrm_queue_qportcfg(bp);
if (rc) {
netdev_err(bp->dev, "hwrm query qportcfg failure rc: %x\n",
rc);
rc = -1;
goto init_err_pci_clean;
}
/* Get the MAX capabilities for this function */
rc = bnxt_hwrm_func_qcaps(bp);
if (rc) {
netdev_err(bp->dev, "hwrm query capability failure rc: %x\n",
rc);
rc = -1;
goto init_err_pci_clean;
}
rc = bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(bp);
if (rc)
netdev_warn(bp->dev, "hwrm query adv flow mgnt failure rc: %d\n",
rc);
rc = bnxt_init_mac_addr(bp); rc = bnxt_init_mac_addr(bp);
if (rc) { if (rc) {
dev_err(&pdev->dev, "Unable to initialize mac address.\n"); dev_err(&pdev->dev, "Unable to initialize mac address.\n");
...@@ -10964,11 +10983,6 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -10964,11 +10983,6 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (rc) if (rc)
goto init_err_pci_clean; goto init_err_pci_clean;
} }
bnxt_hwrm_func_qcfg(bp);
bnxt_hwrm_vnic_qcaps(bp);
bnxt_hwrm_port_led_qcaps(bp);
bnxt_ethtool_init(bp);
bnxt_dcb_init(bp);
/* MTU range: 60 - FW defined max */ /* MTU range: 60 - FW defined max */
dev->min_mtu = ETH_ZLEN; dev->min_mtu = ETH_ZLEN;
......
...@@ -1459,8 +1459,8 @@ struct bnxt { ...@@ -1459,8 +1459,8 @@ struct bnxt {
#define BNXT_CHIP_TYPE_NITRO_A0(bp) ((bp)->flags & BNXT_FLAG_CHIP_NITRO_A0) #define BNXT_CHIP_TYPE_NITRO_A0(bp) ((bp)->flags & BNXT_FLAG_CHIP_NITRO_A0)
#define BNXT_RX_PAGE_MODE(bp) ((bp)->flags & BNXT_FLAG_RX_PAGE_MODE) #define BNXT_RX_PAGE_MODE(bp) ((bp)->flags & BNXT_FLAG_RX_PAGE_MODE)
#define BNXT_SUPPORTS_TPA(bp) (!BNXT_CHIP_TYPE_NITRO_A0(bp) && \ #define BNXT_SUPPORTS_TPA(bp) (!BNXT_CHIP_TYPE_NITRO_A0(bp) && \
!(bp->flags & BNXT_FLAG_CHIP_P5) && \ (!((bp)->flags & BNXT_FLAG_CHIP_P5) || \
!is_kdump_kernel()) (bp)->max_tpa_v2) && !is_kdump_kernel())
/* Chip class phase 5 */ /* Chip class phase 5 */
#define BNXT_CHIP_P5(bp) \ #define BNXT_CHIP_P5(bp) \
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment