Commit c98dfcd3 authored by Jakub Kicinski's avatar Jakub Kicinski

Merge branch 'bnxt_en-Updates'

Michael Chan says:

====================
bnxt_en: Updates.

This patchset contains these main features:

1. Add the proper logic to support suspend/resume on the new 57500
   chips.
2. Allow Phy configurations from user on a Multihost function if
   supported by fw.
3. devlink NVRAM flashing support.
4. Add a couple of chip IDs, PHY loopback enhancement, and provide
   more RSS contexts to VFs.

v2: Dropped the devlink info patches to address some feedback
    and resubmit for the 5.6 kernel.
====================
Signed-off-by: default avatarJakub Kicinski <jakub.kicinski@netronome.com>
parents ab44081f d168f328
...@@ -250,10 +250,12 @@ static const u16 bnxt_vf_req_snif[] = { ...@@ -250,10 +250,12 @@ static const u16 bnxt_vf_req_snif[] = {
static const u16 bnxt_async_events_arr[] = { static const u16 bnxt_async_events_arr[] = {
ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE, ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE,
ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE,
ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD, ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD,
ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED, ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED,
ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE, ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE,
ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE, ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE,
ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE,
ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY, ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY,
ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY, ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY,
}; };
...@@ -1968,6 +1970,10 @@ static int bnxt_async_event_process(struct bnxt *bp, ...@@ -1968,6 +1970,10 @@ static int bnxt_async_event_process(struct bnxt *bp,
set_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, &bp->sp_event); set_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, &bp->sp_event);
} }
/* fall through */ /* fall through */
case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE:
case ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE:
set_bit(BNXT_LINK_CFG_CHANGE_SP_EVENT, &bp->sp_event);
/* fall through */
case ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE: case ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE:
set_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event); set_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event);
break; break;
...@@ -2688,6 +2694,9 @@ static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem) ...@@ -2688,6 +2694,9 @@ static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
if (!rmem->pg_arr[i]) if (!rmem->pg_arr[i])
return -ENOMEM; return -ENOMEM;
if (rmem->init_val)
memset(rmem->pg_arr[i], rmem->init_val,
rmem->page_size);
if (rmem->nr_pages > 1 || rmem->depth > 0) { if (rmem->nr_pages > 1 || rmem->depth > 0) {
if (i == rmem->nr_pages - 2 && if (i == rmem->nr_pages - 2 &&
(rmem->flags & BNXT_RMEM_RING_PTE_FLAG)) (rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
...@@ -4394,53 +4403,22 @@ int hwrm_send_message_silent(struct bnxt *bp, void *msg, u32 msg_len, ...@@ -4394,53 +4403,22 @@ int hwrm_send_message_silent(struct bnxt *bp, void *msg, u32 msg_len,
return rc; return rc;
} }
int bnxt_hwrm_func_rgtr_async_events(struct bnxt *bp, unsigned long *bmap, int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp, unsigned long *bmap, int bmap_size,
int bmap_size) bool async_only)
{ {
struct hwrm_func_drv_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
struct hwrm_func_drv_rgtr_input req = {0}; struct hwrm_func_drv_rgtr_input req = {0};
DECLARE_BITMAP(async_events_bmap, 256); DECLARE_BITMAP(async_events_bmap, 256);
u32 *events = (u32 *)async_events_bmap; u32 *events = (u32 *)async_events_bmap;
int i;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_RGTR, -1, -1);
req.enables =
cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
memset(async_events_bmap, 0, sizeof(async_events_bmap));
for (i = 0; i < ARRAY_SIZE(bnxt_async_events_arr); i++) {
u16 event_id = bnxt_async_events_arr[i];
if (event_id == ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY &&
!(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
continue;
__set_bit(bnxt_async_events_arr[i], async_events_bmap);
}
if (bmap && bmap_size) {
for (i = 0; i < bmap_size; i++) {
if (test_bit(i, bmap))
__set_bit(i, async_events_bmap);
}
}
for (i = 0; i < 8; i++)
req.async_event_fwd[i] |= cpu_to_le32(events[i]);
return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
}
static int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp)
{
struct hwrm_func_drv_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
struct hwrm_func_drv_rgtr_input req = {0};
u32 flags; u32 flags;
int rc; int rc, i;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_RGTR, -1, -1); bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_RGTR, -1, -1);
req.enables = req.enables =
cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE | cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE |
FUNC_DRV_RGTR_REQ_ENABLES_VER); FUNC_DRV_RGTR_REQ_ENABLES_VER |
FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
req.os_type = cpu_to_le16(FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX); req.os_type = cpu_to_le16(FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX);
flags = FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE | flags = FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE |
...@@ -4481,11 +4459,36 @@ static int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp) ...@@ -4481,11 +4459,36 @@ static int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp)
req.flags |= cpu_to_le32( req.flags |= cpu_to_le32(
FUNC_DRV_RGTR_REQ_FLAGS_FLOW_HANDLE_64BIT_MODE); FUNC_DRV_RGTR_REQ_FLAGS_FLOW_HANDLE_64BIT_MODE);
memset(async_events_bmap, 0, sizeof(async_events_bmap));
for (i = 0; i < ARRAY_SIZE(bnxt_async_events_arr); i++) {
u16 event_id = bnxt_async_events_arr[i];
if (event_id == ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY &&
!(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
continue;
__set_bit(bnxt_async_events_arr[i], async_events_bmap);
}
if (bmap && bmap_size) {
for (i = 0; i < bmap_size; i++) {
if (test_bit(i, bmap))
__set_bit(i, async_events_bmap);
}
}
for (i = 0; i < 8; i++)
req.async_event_fwd[i] |= cpu_to_le32(events[i]);
if (async_only)
req.enables =
cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
mutex_lock(&bp->hwrm_cmd_lock); mutex_lock(&bp->hwrm_cmd_lock);
rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
if (!rc && (resp->flags & if (!rc) {
cpu_to_le32(FUNC_DRV_RGTR_RESP_FLAGS_IF_CHANGE_SUPPORTED))) set_bit(BNXT_STATE_DRV_REGISTERED, &bp->state);
if (resp->flags &
cpu_to_le32(FUNC_DRV_RGTR_RESP_FLAGS_IF_CHANGE_SUPPORTED))
bp->fw_cap |= BNXT_FW_CAP_IF_CHANGE; bp->fw_cap |= BNXT_FW_CAP_IF_CHANGE;
}
mutex_unlock(&bp->hwrm_cmd_lock); mutex_unlock(&bp->hwrm_cmd_lock);
return rc; return rc;
} }
...@@ -4494,6 +4497,9 @@ static int bnxt_hwrm_func_drv_unrgtr(struct bnxt *bp) ...@@ -4494,6 +4497,9 @@ static int bnxt_hwrm_func_drv_unrgtr(struct bnxt *bp)
{ {
struct hwrm_func_drv_unrgtr_input req = {0}; struct hwrm_func_drv_unrgtr_input req = {0};
if (!test_and_clear_bit(BNXT_STATE_DRV_REGISTERED, &bp->state))
return 0;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_UNRGTR, -1, -1); bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_UNRGTR, -1, -1);
return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
} }
...@@ -6490,6 +6496,7 @@ static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp) ...@@ -6490,6 +6496,7 @@ static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp)
le16_to_cpu(resp->mrav_num_entries_units); le16_to_cpu(resp->mrav_num_entries_units);
ctx->tim_entry_size = le16_to_cpu(resp->tim_entry_size); ctx->tim_entry_size = le16_to_cpu(resp->tim_entry_size);
ctx->tim_max_entries = le32_to_cpu(resp->tim_max_entries); ctx->tim_max_entries = le32_to_cpu(resp->tim_max_entries);
ctx->ctx_kind_initializer = resp->ctx_kind_initializer;
} else { } else {
rc = 0; rc = 0;
} }
...@@ -6644,7 +6651,7 @@ static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp, ...@@ -6644,7 +6651,7 @@ static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp,
static int bnxt_alloc_ctx_pg_tbls(struct bnxt *bp, static int bnxt_alloc_ctx_pg_tbls(struct bnxt *bp,
struct bnxt_ctx_pg_info *ctx_pg, u32 mem_size, struct bnxt_ctx_pg_info *ctx_pg, u32 mem_size,
u8 depth) u8 depth, bool use_init_val)
{ {
struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem; struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
int rc; int rc;
...@@ -6682,6 +6689,8 @@ static int bnxt_alloc_ctx_pg_tbls(struct bnxt *bp, ...@@ -6682,6 +6689,8 @@ static int bnxt_alloc_ctx_pg_tbls(struct bnxt *bp,
rmem->pg_tbl_map = ctx_pg->ctx_dma_arr[i]; rmem->pg_tbl_map = ctx_pg->ctx_dma_arr[i];
rmem->depth = 1; rmem->depth = 1;
rmem->nr_pages = MAX_CTX_PAGES; rmem->nr_pages = MAX_CTX_PAGES;
if (use_init_val)
rmem->init_val = bp->ctx->ctx_kind_initializer;
if (i == (nr_tbls - 1)) { if (i == (nr_tbls - 1)) {
int rem = ctx_pg->nr_pages % MAX_CTX_PAGES; int rem = ctx_pg->nr_pages % MAX_CTX_PAGES;
...@@ -6696,6 +6705,8 @@ static int bnxt_alloc_ctx_pg_tbls(struct bnxt *bp, ...@@ -6696,6 +6705,8 @@ static int bnxt_alloc_ctx_pg_tbls(struct bnxt *bp,
rmem->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE); rmem->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
if (rmem->nr_pages > 1 || depth) if (rmem->nr_pages > 1 || depth)
rmem->depth = 1; rmem->depth = 1;
if (use_init_val)
rmem->init_val = bp->ctx->ctx_kind_initializer;
rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg); rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg);
} }
return rc; return rc;
...@@ -6786,21 +6797,21 @@ static int bnxt_alloc_ctx_mem(struct bnxt *bp) ...@@ -6786,21 +6797,21 @@ static int bnxt_alloc_ctx_mem(struct bnxt *bp)
ctx_pg->entries = ctx->qp_min_qp1_entries + ctx->qp_max_l2_entries + ctx_pg->entries = ctx->qp_min_qp1_entries + ctx->qp_max_l2_entries +
extra_qps; extra_qps;
mem_size = ctx->qp_entry_size * ctx_pg->entries; mem_size = ctx->qp_entry_size * ctx_pg->entries;
rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl); rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, true);
if (rc) if (rc)
return rc; return rc;
ctx_pg = &ctx->srq_mem; ctx_pg = &ctx->srq_mem;
ctx_pg->entries = ctx->srq_max_l2_entries + extra_srqs; ctx_pg->entries = ctx->srq_max_l2_entries + extra_srqs;
mem_size = ctx->srq_entry_size * ctx_pg->entries; mem_size = ctx->srq_entry_size * ctx_pg->entries;
rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl); rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, true);
if (rc) if (rc)
return rc; return rc;
ctx_pg = &ctx->cq_mem; ctx_pg = &ctx->cq_mem;
ctx_pg->entries = ctx->cq_max_l2_entries + extra_qps * 2; ctx_pg->entries = ctx->cq_max_l2_entries + extra_qps * 2;
mem_size = ctx->cq_entry_size * ctx_pg->entries; mem_size = ctx->cq_entry_size * ctx_pg->entries;
rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl); rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, true);
if (rc) if (rc)
return rc; return rc;
...@@ -6808,14 +6819,14 @@ static int bnxt_alloc_ctx_mem(struct bnxt *bp) ...@@ -6808,14 +6819,14 @@ static int bnxt_alloc_ctx_mem(struct bnxt *bp)
ctx_pg->entries = ctx->vnic_max_vnic_entries + ctx_pg->entries = ctx->vnic_max_vnic_entries +
ctx->vnic_max_ring_table_entries; ctx->vnic_max_ring_table_entries;
mem_size = ctx->vnic_entry_size * ctx_pg->entries; mem_size = ctx->vnic_entry_size * ctx_pg->entries;
rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1); rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, true);
if (rc) if (rc)
return rc; return rc;
ctx_pg = &ctx->stat_mem; ctx_pg = &ctx->stat_mem;
ctx_pg->entries = ctx->stat_max_entries; ctx_pg->entries = ctx->stat_max_entries;
mem_size = ctx->stat_entry_size * ctx_pg->entries; mem_size = ctx->stat_entry_size * ctx_pg->entries;
rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1); rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, true);
if (rc) if (rc)
return rc; return rc;
...@@ -6831,7 +6842,7 @@ static int bnxt_alloc_ctx_mem(struct bnxt *bp) ...@@ -6831,7 +6842,7 @@ static int bnxt_alloc_ctx_mem(struct bnxt *bp)
num_ah = 1024 * 128; num_ah = 1024 * 128;
ctx_pg->entries = num_mr + num_ah; ctx_pg->entries = num_mr + num_ah;
mem_size = ctx->mrav_entry_size * ctx_pg->entries; mem_size = ctx->mrav_entry_size * ctx_pg->entries;
rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 2); rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 2, true);
if (rc) if (rc)
return rc; return rc;
ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV; ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV;
...@@ -6843,7 +6854,7 @@ static int bnxt_alloc_ctx_mem(struct bnxt *bp) ...@@ -6843,7 +6854,7 @@ static int bnxt_alloc_ctx_mem(struct bnxt *bp)
ctx_pg = &ctx->tim_mem; ctx_pg = &ctx->tim_mem;
ctx_pg->entries = ctx->qp_mem.entries; ctx_pg->entries = ctx->qp_mem.entries;
mem_size = ctx->tim_entry_size * ctx_pg->entries; mem_size = ctx->tim_entry_size * ctx_pg->entries;
rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1); rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, false);
if (rc) if (rc)
return rc; return rc;
ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM; ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM;
...@@ -6857,7 +6868,7 @@ static int bnxt_alloc_ctx_mem(struct bnxt *bp) ...@@ -6857,7 +6868,7 @@ static int bnxt_alloc_ctx_mem(struct bnxt *bp)
ctx_pg = ctx->tqm_mem[i]; ctx_pg = ctx->tqm_mem[i];
ctx_pg->entries = entries; ctx_pg->entries = entries;
mem_size = ctx->tqm_entry_size * entries; mem_size = ctx->tqm_entry_size * entries;
rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1); rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, false);
if (rc) if (rc)
return rc; return rc;
ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP << i; ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP << i;
...@@ -8414,7 +8425,8 @@ static int bnxt_hwrm_phy_qcaps(struct bnxt *bp) ...@@ -8414,7 +8425,8 @@ static int bnxt_hwrm_phy_qcaps(struct bnxt *bp)
bp->flags &= ~BNXT_FLAG_EEE_CAP; bp->flags &= ~BNXT_FLAG_EEE_CAP;
if (bp->test_info) if (bp->test_info)
bp->test_info->flags &= ~BNXT_TEST_FL_EXT_LPBK; bp->test_info->flags &= ~(BNXT_TEST_FL_EXT_LPBK |
BNXT_TEST_FL_AN_PHY_LPBK);
if (bp->hwrm_spec_code < 0x10201) if (bp->hwrm_spec_code < 0x10201)
return 0; return 0;
...@@ -8440,6 +8452,14 @@ static int bnxt_hwrm_phy_qcaps(struct bnxt *bp) ...@@ -8440,6 +8452,14 @@ static int bnxt_hwrm_phy_qcaps(struct bnxt *bp)
if (bp->test_info) if (bp->test_info)
bp->test_info->flags |= BNXT_TEST_FL_EXT_LPBK; bp->test_info->flags |= BNXT_TEST_FL_EXT_LPBK;
} }
if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_AUTONEG_LPBK_SUPPORTED) {
if (bp->test_info)
bp->test_info->flags |= BNXT_TEST_FL_AN_PHY_LPBK;
}
if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_SHARED_PHY_CFG_SUPPORTED) {
if (BNXT_PF(bp))
bp->fw_cap |= BNXT_FW_CAP_SHARED_PORT_CFG;
}
if (resp->supported_speeds_auto_mode) if (resp->supported_speeds_auto_mode)
link_info->support_auto_speeds = link_info->support_auto_speeds =
le16_to_cpu(resp->supported_speeds_auto_mode); le16_to_cpu(resp->supported_speeds_auto_mode);
...@@ -8554,7 +8574,7 @@ static int bnxt_update_link(struct bnxt *bp, bool chng_link_state) ...@@ -8554,7 +8574,7 @@ static int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
} }
mutex_unlock(&bp->hwrm_cmd_lock); mutex_unlock(&bp->hwrm_cmd_lock);
if (!BNXT_SINGLE_PF(bp)) if (!BNXT_PHY_CFG_ABLE(bp))
return 0; return 0;
diff = link_info->support_auto_speeds ^ link_info->advertising; diff = link_info->support_auto_speeds ^ link_info->advertising;
...@@ -10239,6 +10259,31 @@ static void bnxt_chk_missed_irq(struct bnxt *bp) ...@@ -10239,6 +10259,31 @@ static void bnxt_chk_missed_irq(struct bnxt *bp)
static void bnxt_cfg_ntp_filters(struct bnxt *); static void bnxt_cfg_ntp_filters(struct bnxt *);
static void bnxt_init_ethtool_link_settings(struct bnxt *bp)
{
struct bnxt_link_info *link_info = &bp->link_info;
if (BNXT_AUTO_MODE(link_info->auto_mode)) {
link_info->autoneg = BNXT_AUTONEG_SPEED;
if (bp->hwrm_spec_code >= 0x10201) {
if (link_info->auto_pause_setting &
PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE)
link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
} else {
link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
}
link_info->advertising = link_info->auto_link_speeds;
} else {
link_info->req_link_speed = link_info->force_link_speed;
link_info->req_duplex = link_info->duplex_setting;
}
if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
link_info->req_flow_ctrl =
link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH;
else
link_info->req_flow_ctrl = link_info->force_pause_setting;
}
static void bnxt_sp_task(struct work_struct *work) static void bnxt_sp_task(struct work_struct *work)
{ {
struct bnxt *bp = container_of(work, struct bnxt, sp_task); struct bnxt *bp = container_of(work, struct bnxt, sp_task);
...@@ -10289,6 +10334,10 @@ static void bnxt_sp_task(struct work_struct *work) ...@@ -10289,6 +10334,10 @@ static void bnxt_sp_task(struct work_struct *work)
&bp->sp_event)) &bp->sp_event))
bnxt_hwrm_phy_qcaps(bp); bnxt_hwrm_phy_qcaps(bp);
if (test_and_clear_bit(BNXT_LINK_CFG_CHANGE_SP_EVENT,
&bp->sp_event))
bnxt_init_ethtool_link_settings(bp);
rc = bnxt_update_link(bp, true); rc = bnxt_update_link(bp, true);
mutex_unlock(&bp->link_lock); mutex_unlock(&bp->link_lock);
if (rc) if (rc)
...@@ -10484,11 +10533,7 @@ static int bnxt_fw_init_one_p2(struct bnxt *bp) ...@@ -10484,11 +10533,7 @@ static int bnxt_fw_init_one_p2(struct bnxt *bp)
netdev_warn(bp->dev, "hwrm query error recovery failure rc: %d\n", netdev_warn(bp->dev, "hwrm query error recovery failure rc: %d\n",
rc); rc);
rc = bnxt_hwrm_func_drv_rgtr(bp); rc = bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false);
if (rc)
return -ENODEV;
rc = bnxt_hwrm_func_rgtr_async_events(bp, NULL, 0);
if (rc) if (rc)
return -ENODEV; return -ENODEV;
...@@ -11405,26 +11450,7 @@ static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt) ...@@ -11405,26 +11450,7 @@ static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt)
if (!fw_dflt) if (!fw_dflt)
return 0; return 0;
/*initialize the ethool setting copy with NVM settings */ bnxt_init_ethtool_link_settings(bp);
if (BNXT_AUTO_MODE(link_info->auto_mode)) {
link_info->autoneg = BNXT_AUTONEG_SPEED;
if (bp->hwrm_spec_code >= 0x10201) {
if (link_info->auto_pause_setting &
PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE)
link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
} else {
link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
}
link_info->advertising = link_info->auto_link_speeds;
} else {
link_info->req_link_speed = link_info->force_link_speed;
link_info->req_duplex = link_info->duplex_setting;
}
if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
link_info->req_flow_ctrl =
link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH;
else
link_info->req_flow_ctrl = link_info->force_pause_setting;
return 0; return 0;
} }
...@@ -11864,6 +11890,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -11864,6 +11890,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
bnxt_clear_int_mode(bp); bnxt_clear_int_mode(bp);
init_err_pci_clean: init_err_pci_clean:
bnxt_hwrm_func_drv_unrgtr(bp);
bnxt_free_hwrm_short_cmd_req(bp); bnxt_free_hwrm_short_cmd_req(bp);
bnxt_free_hwrm_resources(bp); bnxt_free_hwrm_resources(bp);
bnxt_free_ctx_mem(bp); bnxt_free_ctx_mem(bp);
...@@ -11921,6 +11948,10 @@ static int bnxt_suspend(struct device *device) ...@@ -11921,6 +11948,10 @@ static int bnxt_suspend(struct device *device)
rc = bnxt_close(dev); rc = bnxt_close(dev);
} }
bnxt_hwrm_func_drv_unrgtr(bp); bnxt_hwrm_func_drv_unrgtr(bp);
pci_disable_device(bp->pdev);
bnxt_free_ctx_mem(bp);
kfree(bp->ctx);
bp->ctx = NULL;
rtnl_unlock(); rtnl_unlock();
return rc; return rc;
} }
...@@ -11932,7 +11963,14 @@ static int bnxt_resume(struct device *device) ...@@ -11932,7 +11963,14 @@ static int bnxt_resume(struct device *device)
int rc = 0; int rc = 0;
rtnl_lock(); rtnl_lock();
if (bnxt_hwrm_ver_get(bp) || bnxt_hwrm_func_drv_rgtr(bp)) { rc = pci_enable_device(bp->pdev);
if (rc) {
netdev_err(dev, "Cannot re-enable PCI device during resume, err = %d\n",
rc);
goto resume_exit;
}
pci_set_master(bp->pdev);
if (bnxt_hwrm_ver_get(bp)) {
rc = -ENODEV; rc = -ENODEV;
goto resume_exit; goto resume_exit;
} }
...@@ -11941,6 +11979,26 @@ static int bnxt_resume(struct device *device) ...@@ -11941,6 +11979,26 @@ static int bnxt_resume(struct device *device)
rc = -EBUSY; rc = -EBUSY;
goto resume_exit; goto resume_exit;
} }
if (bnxt_hwrm_queue_qportcfg(bp)) {
rc = -ENODEV;
goto resume_exit;
}
if (bp->hwrm_spec_code >= 0x10803) {
if (bnxt_alloc_ctx_mem(bp)) {
rc = -ENODEV;
goto resume_exit;
}
}
if (BNXT_NEW_RM(bp))
bnxt_hwrm_func_resc_qcaps(bp, false);
if (bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false)) {
rc = -ENODEV;
goto resume_exit;
}
bnxt_get_wol_settings(bp); bnxt_get_wol_settings(bp);
if (netif_running(dev)) { if (netif_running(dev)) {
rc = bnxt_open(dev); rc = bnxt_open(dev);
......
...@@ -721,6 +721,7 @@ struct bnxt_ring_mem_info { ...@@ -721,6 +721,7 @@ struct bnxt_ring_mem_info {
#define BNXT_RMEM_USE_FULL_PAGE_FLAG 4 #define BNXT_RMEM_USE_FULL_PAGE_FLAG 4
u16 depth; u16 depth;
u8 init_val;
void **pg_arr; void **pg_arr;
dma_addr_t *dma_arr; dma_addr_t *dma_arr;
...@@ -1226,6 +1227,7 @@ struct bnxt_test_info { ...@@ -1226,6 +1227,7 @@ struct bnxt_test_info {
u8 offline_mask; u8 offline_mask;
u8 flags; u8 flags;
#define BNXT_TEST_FL_EXT_LPBK 0x1 #define BNXT_TEST_FL_EXT_LPBK 0x1
#define BNXT_TEST_FL_AN_PHY_LPBK 0x2
u16 timeout; u16 timeout;
char string[BNXT_MAX_TEST][ETH_GSTRING_LEN]; char string[BNXT_MAX_TEST][ETH_GSTRING_LEN];
}; };
...@@ -1352,6 +1354,7 @@ struct bnxt_ctx_mem_info { ...@@ -1352,6 +1354,7 @@ struct bnxt_ctx_mem_info {
u32 tim_max_entries; u32 tim_max_entries;
u16 mrav_num_entries_units; u16 mrav_num_entries_units;
u8 tqm_entries_multiple; u8 tqm_entries_multiple;
u8 ctx_kind_initializer;
u32 flags; u32 flags;
#define BNXT_CTX_FLAG_INITED 0x01 #define BNXT_CTX_FLAG_INITED 0x01
...@@ -1443,6 +1446,8 @@ struct bnxt { ...@@ -1443,6 +1446,8 @@ struct bnxt {
#define CHIP_NUM_57414L 0x16db #define CHIP_NUM_57414L 0x16db
#define CHIP_NUM_5745X 0xd730 #define CHIP_NUM_5745X 0xd730
#define CHIP_NUM_57452 0xc452
#define CHIP_NUM_57454 0xc454
#define CHIP_NUM_57508 0x1750 #define CHIP_NUM_57508 0x1750
#define CHIP_NUM_57504 0x1751 #define CHIP_NUM_57504 0x1751
...@@ -1475,7 +1480,10 @@ struct bnxt { ...@@ -1475,7 +1480,10 @@ struct bnxt {
((chip_num) == CHIP_NUM_58700) ((chip_num) == CHIP_NUM_58700)
#define BNXT_CHIP_NUM_5745X(chip_num) \ #define BNXT_CHIP_NUM_5745X(chip_num) \
((chip_num) == CHIP_NUM_5745X) ((chip_num) == CHIP_NUM_5745X || \
(chip_num) == CHIP_NUM_57452 || \
(chip_num) == CHIP_NUM_57454)
#define BNXT_CHIP_NUM_57X0X(chip_num) \ #define BNXT_CHIP_NUM_57X0X(chip_num) \
(BNXT_CHIP_NUM_5730X(chip_num) || BNXT_CHIP_NUM_5740X(chip_num)) (BNXT_CHIP_NUM_5730X(chip_num) || BNXT_CHIP_NUM_5740X(chip_num))
...@@ -1540,6 +1548,8 @@ struct bnxt { ...@@ -1540,6 +1548,8 @@ struct bnxt {
#define BNXT_NPAR(bp) ((bp)->port_partition_type) #define BNXT_NPAR(bp) ((bp)->port_partition_type)
#define BNXT_MH(bp) ((bp)->flags & BNXT_FLAG_MULTI_HOST) #define BNXT_MH(bp) ((bp)->flags & BNXT_FLAG_MULTI_HOST)
#define BNXT_SINGLE_PF(bp) (BNXT_PF(bp) && !BNXT_NPAR(bp) && !BNXT_MH(bp)) #define BNXT_SINGLE_PF(bp) (BNXT_PF(bp) && !BNXT_NPAR(bp) && !BNXT_MH(bp))
#define BNXT_PHY_CFG_ABLE(bp) (BNXT_SINGLE_PF(bp) || \
((bp)->fw_cap & BNXT_FW_CAP_SHARED_PORT_CFG))
#define BNXT_CHIP_TYPE_NITRO_A0(bp) ((bp)->flags & BNXT_FLAG_CHIP_NITRO_A0) #define BNXT_CHIP_TYPE_NITRO_A0(bp) ((bp)->flags & BNXT_FLAG_CHIP_NITRO_A0)
#define BNXT_RX_PAGE_MODE(bp) ((bp)->flags & BNXT_FLAG_RX_PAGE_MODE) #define BNXT_RX_PAGE_MODE(bp) ((bp)->flags & BNXT_FLAG_RX_PAGE_MODE)
#define BNXT_SUPPORTS_TPA(bp) (!BNXT_CHIP_TYPE_NITRO_A0(bp) && \ #define BNXT_SUPPORTS_TPA(bp) (!BNXT_CHIP_TYPE_NITRO_A0(bp) && \
...@@ -1641,6 +1651,7 @@ struct bnxt { ...@@ -1641,6 +1651,7 @@ struct bnxt {
#define BNXT_STATE_IN_FW_RESET 4 #define BNXT_STATE_IN_FW_RESET 4
#define BNXT_STATE_ABORT_ERR 5 #define BNXT_STATE_ABORT_ERR 5
#define BNXT_STATE_FW_FATAL_COND 6 #define BNXT_STATE_FW_FATAL_COND 6
#define BNXT_STATE_DRV_REGISTERED 7
struct bnxt_irq *irq_tbl; struct bnxt_irq *irq_tbl;
int total_irqs; int total_irqs;
...@@ -1673,6 +1684,7 @@ struct bnxt { ...@@ -1673,6 +1684,7 @@ struct bnxt {
#define BNXT_FW_CAP_EXT_STATS_SUPPORTED 0x00040000 #define BNXT_FW_CAP_EXT_STATS_SUPPORTED 0x00040000
#define BNXT_FW_CAP_ERR_RECOVER_RELOAD 0x00100000 #define BNXT_FW_CAP_ERR_RECOVER_RELOAD 0x00100000
#define BNXT_FW_CAP_HOT_RESET 0x00200000 #define BNXT_FW_CAP_HOT_RESET 0x00200000
#define BNXT_FW_CAP_SHARED_PORT_CFG 0x00400000
#define BNXT_NEW_RM(bp) ((bp)->fw_cap & BNXT_FW_CAP_NEW_RM) #define BNXT_NEW_RM(bp) ((bp)->fw_cap & BNXT_FW_CAP_NEW_RM)
u32 hwrm_spec_code; u32 hwrm_spec_code;
...@@ -1754,6 +1766,7 @@ struct bnxt { ...@@ -1754,6 +1766,7 @@ struct bnxt {
#define BNXT_RING_COAL_NOW_SP_EVENT 17 #define BNXT_RING_COAL_NOW_SP_EVENT 17
#define BNXT_FW_RESET_NOTIFY_SP_EVENT 18 #define BNXT_FW_RESET_NOTIFY_SP_EVENT 18
#define BNXT_FW_EXCEPTION_SP_EVENT 19 #define BNXT_FW_EXCEPTION_SP_EVENT 19
#define BNXT_LINK_CFG_CHANGE_SP_EVENT 21
struct delayed_work fw_reset_task; struct delayed_work fw_reset_task;
int fw_reset_state; int fw_reset_state;
...@@ -1990,8 +2003,8 @@ int _hwrm_send_message(struct bnxt *, void *, u32, int); ...@@ -1990,8 +2003,8 @@ int _hwrm_send_message(struct bnxt *, void *, u32, int);
int _hwrm_send_message_silent(struct bnxt *bp, void *msg, u32 len, int timeout); int _hwrm_send_message_silent(struct bnxt *bp, void *msg, u32 len, int timeout);
int hwrm_send_message(struct bnxt *, void *, u32, int); int hwrm_send_message(struct bnxt *, void *, u32, int);
int hwrm_send_message_silent(struct bnxt *, void *, u32, int); int hwrm_send_message_silent(struct bnxt *, void *, u32, int);
int bnxt_hwrm_func_rgtr_async_events(struct bnxt *bp, unsigned long *bmap, int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp, unsigned long *bmap,
int bmap_size); int bmap_size, bool async_only);
int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id); int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id);
int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings); int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings);
int bnxt_nq_rings_in_use(struct bnxt *bp); int bnxt_nq_rings_in_use(struct bnxt *bp);
......
...@@ -14,6 +14,25 @@ ...@@ -14,6 +14,25 @@
#include "bnxt.h" #include "bnxt.h"
#include "bnxt_vfr.h" #include "bnxt_vfr.h"
#include "bnxt_devlink.h" #include "bnxt_devlink.h"
#include "bnxt_ethtool.h"
static int
bnxt_dl_flash_update(struct devlink *dl, const char *filename,
const char *region, struct netlink_ext_ack *extack)
{
struct bnxt *bp = bnxt_get_bp_from_dl(dl);
if (region)
return -EOPNOTSUPP;
if (!BNXT_PF(bp)) {
NL_SET_ERR_MSG_MOD(extack,
"flash update not supported from a VF");
return -EPERM;
}
return bnxt_flash_package_from_file(bp->dev, filename, 0);
}
static int bnxt_fw_reporter_diagnose(struct devlink_health_reporter *reporter, static int bnxt_fw_reporter_diagnose(struct devlink_health_reporter *reporter,
struct devlink_fmsg *fmsg, struct devlink_fmsg *fmsg,
...@@ -225,6 +244,7 @@ static const struct devlink_ops bnxt_dl_ops = { ...@@ -225,6 +244,7 @@ static const struct devlink_ops bnxt_dl_ops = {
.eswitch_mode_set = bnxt_dl_eswitch_mode_set, .eswitch_mode_set = bnxt_dl_eswitch_mode_set,
.eswitch_mode_get = bnxt_dl_eswitch_mode_get, .eswitch_mode_get = bnxt_dl_eswitch_mode_get,
#endif /* CONFIG_BNXT_SRIOV */ #endif /* CONFIG_BNXT_SRIOV */
.flash_update = bnxt_dl_flash_update,
}; };
enum bnxt_dl_param_id { enum bnxt_dl_param_id {
......
...@@ -1590,7 +1590,7 @@ static int bnxt_set_link_ksettings(struct net_device *dev, ...@@ -1590,7 +1590,7 @@ static int bnxt_set_link_ksettings(struct net_device *dev,
u32 speed; u32 speed;
int rc = 0; int rc = 0;
if (!BNXT_SINGLE_PF(bp)) if (!BNXT_PHY_CFG_ABLE(bp))
return -EOPNOTSUPP; return -EOPNOTSUPP;
mutex_lock(&bp->link_lock); mutex_lock(&bp->link_lock);
...@@ -1662,7 +1662,7 @@ static int bnxt_set_pauseparam(struct net_device *dev, ...@@ -1662,7 +1662,7 @@ static int bnxt_set_pauseparam(struct net_device *dev,
struct bnxt *bp = netdev_priv(dev); struct bnxt *bp = netdev_priv(dev);
struct bnxt_link_info *link_info = &bp->link_info; struct bnxt_link_info *link_info = &bp->link_info;
if (!BNXT_SINGLE_PF(bp)) if (!BNXT_PHY_CFG_ABLE(bp))
return -EOPNOTSUPP; return -EOPNOTSUPP;
if (epause->autoneg) { if (epause->autoneg) {
...@@ -2000,8 +2000,8 @@ static int bnxt_flash_firmware_from_file(struct net_device *dev, ...@@ -2000,8 +2000,8 @@ static int bnxt_flash_firmware_from_file(struct net_device *dev,
return rc; return rc;
} }
static int bnxt_flash_package_from_file(struct net_device *dev, int bnxt_flash_package_from_file(struct net_device *dev, const char *filename,
char *filename, u32 install_type) u32 install_type)
{ {
struct bnxt *bp = netdev_priv(dev); struct bnxt *bp = netdev_priv(dev);
struct hwrm_nvm_install_update_output *resp = bp->hwrm_cmd_resp_addr; struct hwrm_nvm_install_update_output *resp = bp->hwrm_cmd_resp_addr;
...@@ -2399,7 +2399,7 @@ static int bnxt_set_eee(struct net_device *dev, struct ethtool_eee *edata) ...@@ -2399,7 +2399,7 @@ static int bnxt_set_eee(struct net_device *dev, struct ethtool_eee *edata)
_bnxt_fw_to_ethtool_adv_spds(link_info->advertising, 0); _bnxt_fw_to_ethtool_adv_spds(link_info->advertising, 0);
int rc = 0; int rc = 0;
if (!BNXT_SINGLE_PF(bp)) if (!BNXT_PHY_CFG_ABLE(bp))
return -EOPNOTSUPP; return -EOPNOTSUPP;
if (!(bp->flags & BNXT_FLAG_EEE_CAP)) if (!(bp->flags & BNXT_FLAG_EEE_CAP))
...@@ -2586,7 +2586,7 @@ static int bnxt_nway_reset(struct net_device *dev) ...@@ -2586,7 +2586,7 @@ static int bnxt_nway_reset(struct net_device *dev)
struct bnxt *bp = netdev_priv(dev); struct bnxt *bp = netdev_priv(dev);
struct bnxt_link_info *link_info = &bp->link_info; struct bnxt_link_info *link_info = &bp->link_info;
if (!BNXT_SINGLE_PF(bp)) if (!BNXT_PHY_CFG_ABLE(bp))
return -EOPNOTSUPP; return -EOPNOTSUPP;
if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) if (!(link_info->autoneg & BNXT_AUTONEG_SPEED))
...@@ -2698,7 +2698,8 @@ static int bnxt_disable_an_for_lpbk(struct bnxt *bp, ...@@ -2698,7 +2698,8 @@ static int bnxt_disable_an_for_lpbk(struct bnxt *bp,
u16 fw_speed; u16 fw_speed;
int rc; int rc;
if (!link_info->autoneg) if (!link_info->autoneg ||
(bp->test_info->flags & BNXT_TEST_FL_AN_PHY_LPBK))
return 0; return 0;
rc = bnxt_query_force_speeds(bp, &fw_advertising); rc = bnxt_query_force_speeds(bp, &fw_advertising);
......
...@@ -81,6 +81,8 @@ extern const struct ethtool_ops bnxt_ethtool_ops; ...@@ -81,6 +81,8 @@ extern const struct ethtool_ops bnxt_ethtool_ops;
u32 _bnxt_fw_to_ethtool_adv_spds(u16, u8); u32 _bnxt_fw_to_ethtool_adv_spds(u16, u8);
u32 bnxt_fw_to_ethtool_speed(u16); u32 bnxt_fw_to_ethtool_speed(u16);
u16 bnxt_get_fw_auto_link_speeds(u32); u16 bnxt_get_fw_auto_link_speeds(u32);
int bnxt_flash_package_from_file(struct net_device *dev, const char *filename,
u32 install_type);
void bnxt_ethtool_init(struct bnxt *bp); void bnxt_ethtool_init(struct bnxt *bp);
void bnxt_ethtool_free(struct bnxt *bp); void bnxt_ethtool_free(struct bnxt *bp);
......
...@@ -515,6 +515,7 @@ static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs, bool reset) ...@@ -515,6 +515,7 @@ static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs, bool reset)
struct bnxt_pf_info *pf = &bp->pf; struct bnxt_pf_info *pf = &bp->pf;
int i, rc = 0, min = 1; int i, rc = 0, min = 1;
u16 vf_msix = 0; u16 vf_msix = 0;
u16 vf_rss;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_RESOURCE_CFG, -1, -1); bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_RESOURCE_CFG, -1, -1);
...@@ -533,9 +534,9 @@ static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs, bool reset) ...@@ -533,9 +534,9 @@ static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs, bool reset)
vf_tx_rings = hw_resc->max_tx_rings - bp->tx_nr_rings; vf_tx_rings = hw_resc->max_tx_rings - bp->tx_nr_rings;
vf_vnics = hw_resc->max_vnics - bp->nr_vnics; vf_vnics = hw_resc->max_vnics - bp->nr_vnics;
vf_vnics = min_t(u16, vf_vnics, vf_rx_rings); vf_vnics = min_t(u16, vf_vnics, vf_rx_rings);
vf_rss = hw_resc->max_rsscos_ctxs - bp->rsscos_nr_ctxs;
req.min_rsscos_ctx = cpu_to_le16(BNXT_VF_MIN_RSS_CTX); req.min_rsscos_ctx = cpu_to_le16(BNXT_VF_MIN_RSS_CTX);
req.max_rsscos_ctx = cpu_to_le16(BNXT_VF_MAX_RSS_CTX);
if (pf->vf_resv_strategy == BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC) { if (pf->vf_resv_strategy == BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC) {
min = 0; min = 0;
req.min_rsscos_ctx = cpu_to_le16(min); req.min_rsscos_ctx = cpu_to_le16(min);
...@@ -557,6 +558,7 @@ static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs, bool reset) ...@@ -557,6 +558,7 @@ static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs, bool reset)
vf_vnics /= num_vfs; vf_vnics /= num_vfs;
vf_stat_ctx /= num_vfs; vf_stat_ctx /= num_vfs;
vf_ring_grps /= num_vfs; vf_ring_grps /= num_vfs;
vf_rss /= num_vfs;
req.min_cmpl_rings = cpu_to_le16(vf_cp_rings); req.min_cmpl_rings = cpu_to_le16(vf_cp_rings);
req.min_tx_rings = cpu_to_le16(vf_tx_rings); req.min_tx_rings = cpu_to_le16(vf_tx_rings);
...@@ -565,6 +567,7 @@ static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs, bool reset) ...@@ -565,6 +567,7 @@ static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs, bool reset)
req.min_vnics = cpu_to_le16(vf_vnics); req.min_vnics = cpu_to_le16(vf_vnics);
req.min_stat_ctx = cpu_to_le16(vf_stat_ctx); req.min_stat_ctx = cpu_to_le16(vf_stat_ctx);
req.min_hw_ring_grps = cpu_to_le16(vf_ring_grps); req.min_hw_ring_grps = cpu_to_le16(vf_ring_grps);
req.min_rsscos_ctx = cpu_to_le16(vf_rss);
} }
req.max_cmpl_rings = cpu_to_le16(vf_cp_rings); req.max_cmpl_rings = cpu_to_le16(vf_cp_rings);
req.max_tx_rings = cpu_to_le16(vf_tx_rings); req.max_tx_rings = cpu_to_le16(vf_tx_rings);
...@@ -573,6 +576,7 @@ static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs, bool reset) ...@@ -573,6 +576,7 @@ static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs, bool reset)
req.max_vnics = cpu_to_le16(vf_vnics); req.max_vnics = cpu_to_le16(vf_vnics);
req.max_stat_ctx = cpu_to_le16(vf_stat_ctx); req.max_stat_ctx = cpu_to_le16(vf_stat_ctx);
req.max_hw_ring_grps = cpu_to_le16(vf_ring_grps); req.max_hw_ring_grps = cpu_to_le16(vf_ring_grps);
req.max_rsscos_ctx = cpu_to_le16(vf_rss);
if (bp->flags & BNXT_FLAG_CHIP_P5) if (bp->flags & BNXT_FLAG_CHIP_P5)
req.max_msix = cpu_to_le16(vf_msix / num_vfs); req.max_msix = cpu_to_le16(vf_msix / num_vfs);
...@@ -598,7 +602,7 @@ static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs, bool reset) ...@@ -598,7 +602,7 @@ static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs, bool reset)
hw_resc->max_hw_ring_grps -= le16_to_cpu(req.min_hw_ring_grps) * hw_resc->max_hw_ring_grps -= le16_to_cpu(req.min_hw_ring_grps) *
n; n;
hw_resc->max_cp_rings -= le16_to_cpu(req.min_cmpl_rings) * n; hw_resc->max_cp_rings -= le16_to_cpu(req.min_cmpl_rings) * n;
hw_resc->max_rsscos_ctxs -= pf->active_vfs; hw_resc->max_rsscos_ctxs -= le16_to_cpu(req.min_rsscos_ctx) * n;
hw_resc->max_stat_ctxs -= le16_to_cpu(req.min_stat_ctx) * n; hw_resc->max_stat_ctxs -= le16_to_cpu(req.min_stat_ctx) * n;
hw_resc->max_vnics -= le16_to_cpu(req.min_vnics) * n; hw_resc->max_vnics -= le16_to_cpu(req.min_vnics) * n;
if (bp->flags & BNXT_FLAG_CHIP_P5) if (bp->flags & BNXT_FLAG_CHIP_P5)
......
...@@ -81,7 +81,7 @@ static int bnxt_unregister_dev(struct bnxt_en_dev *edev, int ulp_id) ...@@ -81,7 +81,7 @@ static int bnxt_unregister_dev(struct bnxt_en_dev *edev, int ulp_id)
edev->en_ops->bnxt_free_msix(edev, ulp_id); edev->en_ops->bnxt_free_msix(edev, ulp_id);
if (ulp->max_async_event_id) if (ulp->max_async_event_id)
bnxt_hwrm_func_rgtr_async_events(bp, NULL, 0); bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, true);
RCU_INIT_POINTER(ulp->ulp_ops, NULL); RCU_INIT_POINTER(ulp->ulp_ops, NULL);
synchronize_rcu(); synchronize_rcu();
...@@ -441,7 +441,7 @@ static int bnxt_register_async_events(struct bnxt_en_dev *edev, int ulp_id, ...@@ -441,7 +441,7 @@ static int bnxt_register_async_events(struct bnxt_en_dev *edev, int ulp_id,
/* Make sure bnxt_ulp_async_events() sees this order */ /* Make sure bnxt_ulp_async_events() sees this order */
smp_wmb(); smp_wmb();
ulp->max_async_event_id = max_id; ulp->max_async_event_id = max_id;
bnxt_hwrm_func_rgtr_async_events(bp, events_bmap, max_id + 1); bnxt_hwrm_func_drv_rgtr(bp, events_bmap, max_id + 1, true);
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment