Commit 6a26ef9b authored by David S. Miller's avatar David S. Miller

Merge branch 'bnxt_en-next'

Michael Chan says:

====================
bnxt_en: Net-next updates.

This series has 3 main features.  The first is to add mqprio TC to
hardware queue mapping to avoid reprogramming hardware CoS queue
watermarks during run-time.  The second is DIM improvements from
Andy Gospo.  The third is some improvements to VF resource allocations
when supporting large numbers of VFs with more limited resources.

There are some additional minor improvements and a new function level
discard counter.

v2: Fixed EEPROM typo noted by Andrew Lunn.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents c347b927 47558acd
...@@ -2,3 +2,4 @@ obj-$(CONFIG_BNXT) += bnxt_en.o ...@@ -2,3 +2,4 @@ obj-$(CONFIG_BNXT) += bnxt_en.o
bnxt_en-y := bnxt.o bnxt_sriov.o bnxt_ethtool.o bnxt_dcb.o bnxt_ulp.o bnxt_xdp.o bnxt_vfr.o bnxt_devlink.o bnxt_dim.o bnxt_en-y := bnxt.o bnxt_sriov.o bnxt_ethtool.o bnxt_dcb.o bnxt_ulp.o bnxt_xdp.o bnxt_vfr.o bnxt_devlink.o bnxt_dim.o
bnxt_en-$(CONFIG_BNXT_FLOWER_OFFLOAD) += bnxt_tc.o bnxt_en-$(CONFIG_BNXT_FLOWER_OFFLOAD) += bnxt_tc.o
bnxt_en-$(CONFIG_DEBUG_FS) += bnxt_debugfs.o
...@@ -62,6 +62,7 @@ ...@@ -62,6 +62,7 @@
#include "bnxt_vfr.h" #include "bnxt_vfr.h"
#include "bnxt_tc.h" #include "bnxt_tc.h"
#include "bnxt_devlink.h" #include "bnxt_devlink.h"
#include "bnxt_debugfs.h"
#define BNXT_TX_TIMEOUT (5 * HZ) #define BNXT_TX_TIMEOUT (5 * HZ)
...@@ -2383,6 +2384,7 @@ static int bnxt_alloc_tx_rings(struct bnxt *bp) ...@@ -2383,6 +2384,7 @@ static int bnxt_alloc_tx_rings(struct bnxt *bp)
for (i = 0, j = 0; i < bp->tx_nr_rings; i++) { for (i = 0, j = 0; i < bp->tx_nr_rings; i++) {
struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
struct bnxt_ring_struct *ring; struct bnxt_ring_struct *ring;
u8 qidx;
ring = &txr->tx_ring_struct; ring = &txr->tx_ring_struct;
...@@ -2411,7 +2413,8 @@ static int bnxt_alloc_tx_rings(struct bnxt *bp) ...@@ -2411,7 +2413,8 @@ static int bnxt_alloc_tx_rings(struct bnxt *bp)
memset(txr->tx_push, 0, sizeof(struct tx_push_bd)); memset(txr->tx_push, 0, sizeof(struct tx_push_bd));
} }
ring->queue_id = bp->q_info[j].queue_id; qidx = bp->tc_to_qidx[j];
ring->queue_id = bp->q_info[qidx].queue_id;
if (i < bp->tx_nr_rings_xdp) if (i < bp->tx_nr_rings_xdp)
continue; continue;
if (i % bp->tx_nr_rings_per_tc == (bp->tx_nr_rings_per_tc - 1)) if (i % bp->tx_nr_rings_per_tc == (bp->tx_nr_rings_per_tc - 1))
...@@ -3493,15 +3496,29 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len, ...@@ -3493,15 +3496,29 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
if (!timeout) if (!timeout)
timeout = DFLT_HWRM_CMD_TIMEOUT; timeout = DFLT_HWRM_CMD_TIMEOUT;
/* convert timeout to usec */
timeout *= 1000;
i = 0; i = 0;
tmo_count = timeout * 40; /* Short timeout for the first few iterations:
* number of loops = number of loops for short timeout +
* number of loops for standard timeout.
*/
tmo_count = HWRM_SHORT_TIMEOUT_COUNTER;
timeout = timeout - HWRM_SHORT_MIN_TIMEOUT * HWRM_SHORT_TIMEOUT_COUNTER;
tmo_count += DIV_ROUND_UP(timeout, HWRM_MIN_TIMEOUT);
resp_len = bp->hwrm_cmd_resp_addr + HWRM_RESP_LEN_OFFSET; resp_len = bp->hwrm_cmd_resp_addr + HWRM_RESP_LEN_OFFSET;
if (intr_process) { if (intr_process) {
/* Wait until hwrm response cmpl interrupt is processed */ /* Wait until hwrm response cmpl interrupt is processed */
while (bp->hwrm_intr_seq_id != HWRM_SEQ_ID_INVALID && while (bp->hwrm_intr_seq_id != HWRM_SEQ_ID_INVALID &&
i++ < tmo_count) { i++ < tmo_count) {
usleep_range(25, 40); /* on first few passes, just barely sleep */
if (i < HWRM_SHORT_TIMEOUT_COUNTER)
usleep_range(HWRM_SHORT_MIN_TIMEOUT,
HWRM_SHORT_MAX_TIMEOUT);
else
usleep_range(HWRM_MIN_TIMEOUT,
HWRM_MAX_TIMEOUT);
} }
if (bp->hwrm_intr_seq_id != HWRM_SEQ_ID_INVALID) { if (bp->hwrm_intr_seq_id != HWRM_SEQ_ID_INVALID) {
...@@ -3519,7 +3536,13 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len, ...@@ -3519,7 +3536,13 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
HWRM_RESP_LEN_SFT; HWRM_RESP_LEN_SFT;
if (len) if (len)
break; break;
usleep_range(25, 40); /* on first few passes, just barely sleep */
if (i < DFLT_HWRM_CMD_TIMEOUT)
usleep_range(HWRM_SHORT_MIN_TIMEOUT,
HWRM_SHORT_MAX_TIMEOUT);
else
usleep_range(HWRM_MIN_TIMEOUT,
HWRM_MAX_TIMEOUT);
} }
if (i >= tmo_count) { if (i >= tmo_count) {
...@@ -4334,26 +4357,9 @@ static int hwrm_ring_alloc_send_msg(struct bnxt *bp, ...@@ -4334,26 +4357,9 @@ static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
mutex_unlock(&bp->hwrm_cmd_lock); mutex_unlock(&bp->hwrm_cmd_lock);
if (rc || err) { if (rc || err) {
switch (ring_type) { netdev_err(bp->dev, "hwrm_ring_alloc type %d failed. rc:%x err:%x\n",
case RING_FREE_REQ_RING_TYPE_L2_CMPL: ring_type, rc, err);
netdev_err(bp->dev, "hwrm_ring_alloc cp failed. rc:%x err:%x\n", return -EIO;
rc, err);
return -1;
case RING_FREE_REQ_RING_TYPE_RX:
netdev_err(bp->dev, "hwrm_ring_alloc rx failed. rc:%x err:%x\n",
rc, err);
return -1;
case RING_FREE_REQ_RING_TYPE_TX:
netdev_err(bp->dev, "hwrm_ring_alloc tx failed. rc:%x err:%x\n",
rc, err);
return -1;
default:
netdev_err(bp->dev, "Invalid ring\n");
return -1;
}
} }
ring->fw_ring_id = ring_id; ring->fw_ring_id = ring_id;
return rc; return rc;
...@@ -4477,23 +4483,9 @@ static int hwrm_ring_free_send_msg(struct bnxt *bp, ...@@ -4477,23 +4483,9 @@ static int hwrm_ring_free_send_msg(struct bnxt *bp,
mutex_unlock(&bp->hwrm_cmd_lock); mutex_unlock(&bp->hwrm_cmd_lock);
if (rc || error_code) { if (rc || error_code) {
switch (ring_type) { netdev_err(bp->dev, "hwrm_ring_free type %d failed. rc:%x err:%x\n",
case RING_FREE_REQ_RING_TYPE_L2_CMPL: ring_type, rc, error_code);
netdev_err(bp->dev, "hwrm_ring_free cp failed. rc:%d\n", return -EIO;
rc);
return rc;
case RING_FREE_REQ_RING_TYPE_RX:
netdev_err(bp->dev, "hwrm_ring_free rx failed. rc:%d\n",
rc);
return rc;
case RING_FREE_REQ_RING_TYPE_TX:
netdev_err(bp->dev, "hwrm_ring_free tx failed. rc:%d\n",
rc);
return rc;
default:
netdev_err(bp->dev, "Invalid ring\n");
return -1;
}
} }
return 0; return 0;
} }
...@@ -4721,6 +4713,10 @@ bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings, ...@@ -4721,6 +4713,10 @@ bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
__bnxt_hwrm_reserve_vf_rings(bp, &req, tx_rings, rx_rings, ring_grps, __bnxt_hwrm_reserve_vf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
cp_rings, vnics); cp_rings, vnics);
req.enables |= cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS |
FUNC_VF_CFG_REQ_ENABLES_NUM_L2_CTXS);
req.num_rsscos_ctxs = cpu_to_le16(BNXT_VF_MAX_RSS_CTX);
req.num_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX);
rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
if (rc) if (rc)
return -ENOMEM; return -ENOMEM;
...@@ -5309,6 +5305,7 @@ static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp) ...@@ -5309,6 +5305,7 @@ static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
for (i = 0; i < bp->max_tc; i++) { for (i = 0; i < bp->max_tc; i++) {
bp->q_info[i].queue_id = *qptr++; bp->q_info[i].queue_id = *qptr++;
bp->q_info[i].queue_profile = *qptr++; bp->q_info[i].queue_profile = *qptr++;
bp->tc_to_qidx[i] = i;
} }
qportcfg_exit: qportcfg_exit:
...@@ -5376,7 +5373,8 @@ int bnxt_hwrm_fw_set_time(struct bnxt *bp) ...@@ -5376,7 +5373,8 @@ int bnxt_hwrm_fw_set_time(struct bnxt *bp)
struct tm tm; struct tm tm;
time64_t now = ktime_get_real_seconds(); time64_t now = ktime_get_real_seconds();
if (bp->hwrm_spec_code < 0x10400) if ((BNXT_VF(bp) && bp->hwrm_spec_code < 0x10901) ||
bp->hwrm_spec_code < 0x10400)
return -EOPNOTSUPP; return -EOPNOTSUPP;
time64_to_tm(now, 0, &tm); time64_to_tm(now, 0, &tm);
...@@ -5958,6 +5956,9 @@ static int bnxt_init_msix(struct bnxt *bp) ...@@ -5958,6 +5956,9 @@ static int bnxt_init_msix(struct bnxt *bp)
if (total_vecs > max) if (total_vecs > max)
total_vecs = max; total_vecs = max;
if (!total_vecs)
return 0;
msix_ent = kcalloc(total_vecs, sizeof(struct msix_entry), GFP_KERNEL); msix_ent = kcalloc(total_vecs, sizeof(struct msix_entry), GFP_KERNEL);
if (!msix_ent) if (!msix_ent)
return -ENOMEM; return -ENOMEM;
...@@ -6843,6 +6844,8 @@ static void bnxt_preset_reg_win(struct bnxt *bp) ...@@ -6843,6 +6844,8 @@ static void bnxt_preset_reg_win(struct bnxt *bp)
} }
} }
static int bnxt_init_dflt_ring_mode(struct bnxt *bp);
static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
{ {
int rc = 0; int rc = 0;
...@@ -6850,6 +6853,12 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) ...@@ -6850,6 +6853,12 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
bnxt_preset_reg_win(bp); bnxt_preset_reg_win(bp);
netif_carrier_off(bp->dev); netif_carrier_off(bp->dev);
if (irq_re_init) { if (irq_re_init) {
/* Reserve rings now if none were reserved at driver probe. */
rc = bnxt_init_dflt_ring_mode(bp);
if (rc) {
netdev_err(bp->dev, "Failed to reserve default rings at open\n");
return rc;
}
rc = bnxt_reserve_rings(bp); rc = bnxt_reserve_rings(bp);
if (rc) if (rc)
return rc; return rc;
...@@ -6877,6 +6886,7 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) ...@@ -6877,6 +6886,7 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
} }
bnxt_enable_napi(bp); bnxt_enable_napi(bp);
bnxt_debug_dev_init(bp);
rc = bnxt_init_nic(bp, irq_re_init); rc = bnxt_init_nic(bp, irq_re_init);
if (rc) { if (rc) {
...@@ -6909,6 +6919,7 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) ...@@ -6909,6 +6919,7 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
return 0; return 0;
open_err: open_err:
bnxt_debug_dev_exit(bp);
bnxt_disable_napi(bp); bnxt_disable_napi(bp);
bnxt_del_napi(bp); bnxt_del_napi(bp);
...@@ -7002,6 +7013,7 @@ static void __bnxt_close_nic(struct bnxt *bp, bool irq_re_init, ...@@ -7002,6 +7013,7 @@ static void __bnxt_close_nic(struct bnxt *bp, bool irq_re_init,
/* TODO CHIMP_FW: Link/PHY related cleanup if (link_re_init) */ /* TODO CHIMP_FW: Link/PHY related cleanup if (link_re_init) */
bnxt_debug_dev_exit(bp);
bnxt_disable_napi(bp); bnxt_disable_napi(bp);
del_timer_sync(&bp->timer); del_timer_sync(&bp->timer);
bnxt_free_skbs(bp); bnxt_free_skbs(bp);
...@@ -7279,6 +7291,25 @@ static int bnxt_cfg_rx_mode(struct bnxt *bp) ...@@ -7279,6 +7291,25 @@ static int bnxt_cfg_rx_mode(struct bnxt *bp)
return rc; return rc;
} }
static bool bnxt_can_reserve_rings(struct bnxt *bp)
{
#ifdef CONFIG_BNXT_SRIOV
if ((bp->flags & BNXT_FLAG_NEW_RM) && BNXT_VF(bp)) {
struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
/* No minimum rings were provisioned by the PF. Don't
* reserve rings by default when device is down.
*/
if (hw_resc->min_tx_rings || hw_resc->resv_tx_rings)
return true;
if (!netif_running(bp->dev))
return false;
}
#endif
return true;
}
/* If the chip and firmware supports RFS */ /* If the chip and firmware supports RFS */
static bool bnxt_rfs_supported(struct bnxt *bp) static bool bnxt_rfs_supported(struct bnxt *bp)
{ {
...@@ -7295,7 +7326,7 @@ static bool bnxt_rfs_capable(struct bnxt *bp) ...@@ -7295,7 +7326,7 @@ static bool bnxt_rfs_capable(struct bnxt *bp)
#ifdef CONFIG_RFS_ACCEL #ifdef CONFIG_RFS_ACCEL
int vnics, max_vnics, max_rss_ctxs; int vnics, max_vnics, max_rss_ctxs;
if (!(bp->flags & BNXT_FLAG_MSIX_CAP)) if (!(bp->flags & BNXT_FLAG_MSIX_CAP) || !bnxt_can_reserve_rings(bp))
return false; return false;
vnics = 1 + bp->rx_nr_rings; vnics = 1 + bp->rx_nr_rings;
...@@ -7729,7 +7760,7 @@ static void bnxt_init_dflt_coal(struct bnxt *bp) ...@@ -7729,7 +7760,7 @@ static void bnxt_init_dflt_coal(struct bnxt *bp)
coal->coal_bufs = 30; coal->coal_bufs = 30;
coal->coal_ticks_irq = 1; coal->coal_ticks_irq = 1;
coal->coal_bufs_irq = 2; coal->coal_bufs_irq = 2;
coal->idle_thresh = 25; coal->idle_thresh = 50;
coal->bufs_per_record = 2; coal->bufs_per_record = 2;
coal->budget = 64; /* NAPI budget */ coal->budget = 64; /* NAPI budget */
...@@ -8529,6 +8560,9 @@ static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh) ...@@ -8529,6 +8560,9 @@ static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh)
{ {
int dflt_rings, max_rx_rings, max_tx_rings, rc; int dflt_rings, max_rx_rings, max_tx_rings, rc;
if (!bnxt_can_reserve_rings(bp))
return 0;
if (sh) if (sh)
bp->flags |= BNXT_FLAG_SHARED_RINGS; bp->flags |= BNXT_FLAG_SHARED_RINGS;
dflt_rings = netif_get_num_default_rss_queues(); dflt_rings = netif_get_num_default_rss_queues();
...@@ -8574,6 +8608,29 @@ static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh) ...@@ -8574,6 +8608,29 @@ static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh)
return rc; return rc;
} }
static int bnxt_init_dflt_ring_mode(struct bnxt *bp)
{
int rc;
if (bp->tx_nr_rings)
return 0;
rc = bnxt_set_dflt_rings(bp, true);
if (rc) {
netdev_err(bp->dev, "Not enough rings available.\n");
return rc;
}
rc = bnxt_init_int_mode(bp);
if (rc)
return rc;
bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
if (bnxt_rfs_supported(bp) && bnxt_rfs_capable(bp)) {
bp->flags |= BNXT_FLAG_RFS;
bp->dev->features |= NETIF_F_NTUPLE;
}
return 0;
}
int bnxt_restore_pf_fw_resources(struct bnxt *bp) int bnxt_restore_pf_fw_resources(struct bnxt *bp)
{ {
int rc; int rc;
...@@ -9078,6 +9135,7 @@ static struct pci_driver bnxt_pci_driver = { ...@@ -9078,6 +9135,7 @@ static struct pci_driver bnxt_pci_driver = {
static int __init bnxt_init(void) static int __init bnxt_init(void)
{ {
bnxt_debug_init();
return pci_register_driver(&bnxt_pci_driver); return pci_register_driver(&bnxt_pci_driver);
} }
...@@ -9086,6 +9144,7 @@ static void __exit bnxt_exit(void) ...@@ -9086,6 +9144,7 @@ static void __exit bnxt_exit(void)
pci_unregister_driver(&bnxt_pci_driver); pci_unregister_driver(&bnxt_pci_driver);
if (bnxt_pf_wq) if (bnxt_pf_wq)
destroy_workqueue(bnxt_pf_wq); destroy_workqueue(bnxt_pf_wq);
bnxt_debug_exit();
} }
module_init(bnxt_init); module_init(bnxt_init);
......
...@@ -532,6 +532,12 @@ struct rx_tpa_end_cmp_ext { ...@@ -532,6 +532,12 @@ struct rx_tpa_end_cmp_ext {
#define BNXT_HWRM_REQ_MAX_SIZE 128 #define BNXT_HWRM_REQ_MAX_SIZE 128
#define BNXT_HWRM_REQS_PER_PAGE (BNXT_PAGE_SIZE / \ #define BNXT_HWRM_REQS_PER_PAGE (BNXT_PAGE_SIZE / \
BNXT_HWRM_REQ_MAX_SIZE) BNXT_HWRM_REQ_MAX_SIZE)
#define HWRM_SHORT_MIN_TIMEOUT 3
#define HWRM_SHORT_MAX_TIMEOUT 10
#define HWRM_SHORT_TIMEOUT_COUNTER 5
#define HWRM_MIN_TIMEOUT 25
#define HWRM_MAX_TIMEOUT 40
#define BNXT_RX_EVENT 1 #define BNXT_RX_EVENT 1
#define BNXT_AGG_EVENT 2 #define BNXT_AGG_EVENT 2
...@@ -1242,6 +1248,7 @@ struct bnxt { ...@@ -1242,6 +1248,7 @@ struct bnxt {
u8 max_tc; u8 max_tc;
u8 max_lltc; /* lossless TCs */ u8 max_lltc; /* lossless TCs */
struct bnxt_queue_info q_info[BNXT_MAX_QUEUE]; struct bnxt_queue_info q_info[BNXT_MAX_QUEUE];
u8 tc_to_qidx[BNXT_MAX_QUEUE];
unsigned int current_interval; unsigned int current_interval;
#define BNXT_TIMER_INTERVAL HZ #define BNXT_TIMER_INTERVAL HZ
...@@ -1384,6 +1391,8 @@ struct bnxt { ...@@ -1384,6 +1391,8 @@ struct bnxt {
u16 *cfa_code_map; /* cfa_code -> vf_idx map */ u16 *cfa_code_map; /* cfa_code -> vf_idx map */
u8 switch_id[8]; u8 switch_id[8];
struct bnxt_tc_info *tc_info; struct bnxt_tc_info *tc_info;
struct dentry *debugfs_pdev;
struct dentry *debugfs_dim;
}; };
#define BNXT_RX_STATS_OFFSET(counter) \ #define BNXT_RX_STATS_OFFSET(counter) \
......
...@@ -21,6 +21,21 @@ ...@@ -21,6 +21,21 @@
#include "bnxt_dcb.h" #include "bnxt_dcb.h"
#ifdef CONFIG_BNXT_DCB #ifdef CONFIG_BNXT_DCB
static int bnxt_queue_to_tc(struct bnxt *bp, u8 queue_id)
{
int i, j;
for (i = 0; i < bp->max_tc; i++) {
if (bp->q_info[i].queue_id == queue_id) {
for (j = 0; j < bp->max_tc; j++) {
if (bp->tc_to_qidx[j] == i)
return j;
}
}
}
return -EINVAL;
}
static int bnxt_hwrm_queue_pri2cos_cfg(struct bnxt *bp, struct ieee_ets *ets) static int bnxt_hwrm_queue_pri2cos_cfg(struct bnxt *bp, struct ieee_ets *ets)
{ {
struct hwrm_queue_pri2cos_cfg_input req = {0}; struct hwrm_queue_pri2cos_cfg_input req = {0};
...@@ -33,10 +48,13 @@ static int bnxt_hwrm_queue_pri2cos_cfg(struct bnxt *bp, struct ieee_ets *ets) ...@@ -33,10 +48,13 @@ static int bnxt_hwrm_queue_pri2cos_cfg(struct bnxt *bp, struct ieee_ets *ets)
pri2cos = &req.pri0_cos_queue_id; pri2cos = &req.pri0_cos_queue_id;
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
u8 qidx;
req.enables |= cpu_to_le32( req.enables |= cpu_to_le32(
QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI0_COS_QUEUE_ID << i); QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI0_COS_QUEUE_ID << i);
pri2cos[i] = bp->q_info[ets->prio_tc[i]].queue_id; qidx = bp->tc_to_qidx[ets->prio_tc[i]];
pri2cos[i] = bp->q_info[qidx].queue_id;
} }
rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
return rc; return rc;
...@@ -55,17 +73,15 @@ static int bnxt_hwrm_queue_pri2cos_qcfg(struct bnxt *bp, struct ieee_ets *ets) ...@@ -55,17 +73,15 @@ static int bnxt_hwrm_queue_pri2cos_qcfg(struct bnxt *bp, struct ieee_ets *ets)
rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
if (!rc) { if (!rc) {
u8 *pri2cos = &resp->pri0_cos_queue_id; u8 *pri2cos = &resp->pri0_cos_queue_id;
int i, j; int i;
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
u8 queue_id = pri2cos[i]; u8 queue_id = pri2cos[i];
int tc;
for (j = 0; j < bp->max_tc; j++) { tc = bnxt_queue_to_tc(bp, queue_id);
if (bp->q_info[j].queue_id == queue_id) { if (tc >= 0)
ets->prio_tc[i] = j; ets->prio_tc[i] = tc;
break;
}
}
} }
} }
mutex_unlock(&bp->hwrm_cmd_lock); mutex_unlock(&bp->hwrm_cmd_lock);
...@@ -81,13 +97,15 @@ static int bnxt_hwrm_queue_cos2bw_cfg(struct bnxt *bp, struct ieee_ets *ets, ...@@ -81,13 +97,15 @@ static int bnxt_hwrm_queue_cos2bw_cfg(struct bnxt *bp, struct ieee_ets *ets,
void *data; void *data;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_COS2BW_CFG, -1, -1); bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_COS2BW_CFG, -1, -1);
data = &req.unused_0; for (i = 0; i < max_tc; i++) {
for (i = 0; i < max_tc; i++, data += sizeof(cos2bw) - 4) { u8 qidx;
req.enables |= cpu_to_le32( req.enables |= cpu_to_le32(
QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID0_VALID << i); QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID0_VALID << i);
memset(&cos2bw, 0, sizeof(cos2bw)); memset(&cos2bw, 0, sizeof(cos2bw));
cos2bw.queue_id = bp->q_info[i].queue_id; qidx = bp->tc_to_qidx[i];
cos2bw.queue_id = bp->q_info[qidx].queue_id;
if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_STRICT) { if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_STRICT) {
cos2bw.tsa = cos2bw.tsa =
QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_SP; QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_SP;
...@@ -103,8 +121,9 @@ static int bnxt_hwrm_queue_cos2bw_cfg(struct bnxt *bp, struct ieee_ets *ets, ...@@ -103,8 +121,9 @@ static int bnxt_hwrm_queue_cos2bw_cfg(struct bnxt *bp, struct ieee_ets *ets,
cpu_to_le32((ets->tc_tx_bw[i] * 100) | cpu_to_le32((ets->tc_tx_bw[i] * 100) |
BW_VALUE_UNIT_PERCENT1_100); BW_VALUE_UNIT_PERCENT1_100);
} }
data = &req.unused_0 + qidx * (sizeof(cos2bw) - 4);
memcpy(data, &cos2bw.queue_id, sizeof(cos2bw) - 4); memcpy(data, &cos2bw.queue_id, sizeof(cos2bw) - 4);
if (i == 0) { if (qidx == 0) {
req.queue_id0 = cos2bw.queue_id; req.queue_id0 = cos2bw.queue_id;
req.unused_0 = 0; req.unused_0 = 0;
} }
...@@ -132,66 +151,81 @@ static int bnxt_hwrm_queue_cos2bw_qcfg(struct bnxt *bp, struct ieee_ets *ets) ...@@ -132,66 +151,81 @@ static int bnxt_hwrm_queue_cos2bw_qcfg(struct bnxt *bp, struct ieee_ets *ets)
data = &resp->queue_id0 + offsetof(struct bnxt_cos2bw_cfg, queue_id); data = &resp->queue_id0 + offsetof(struct bnxt_cos2bw_cfg, queue_id);
for (i = 0; i < bp->max_tc; i++, data += sizeof(cos2bw) - 4) { for (i = 0; i < bp->max_tc; i++, data += sizeof(cos2bw) - 4) {
int j; int tc;
memcpy(&cos2bw.queue_id, data, sizeof(cos2bw) - 4); memcpy(&cos2bw.queue_id, data, sizeof(cos2bw) - 4);
if (i == 0) if (i == 0)
cos2bw.queue_id = resp->queue_id0; cos2bw.queue_id = resp->queue_id0;
for (j = 0; j < bp->max_tc; j++) { tc = bnxt_queue_to_tc(bp, cos2bw.queue_id);
if (bp->q_info[j].queue_id != cos2bw.queue_id) if (tc < 0)
continue; continue;
if (cos2bw.tsa == if (cos2bw.tsa ==
QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_SP) { QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_SP) {
ets->tc_tsa[j] = IEEE_8021QAZ_TSA_STRICT; ets->tc_tsa[tc] = IEEE_8021QAZ_TSA_STRICT;
} else { } else {
ets->tc_tsa[j] = IEEE_8021QAZ_TSA_ETS; ets->tc_tsa[tc] = IEEE_8021QAZ_TSA_ETS;
ets->tc_tx_bw[j] = cos2bw.bw_weight; ets->tc_tx_bw[tc] = cos2bw.bw_weight;
}
} }
} }
mutex_unlock(&bp->hwrm_cmd_lock); mutex_unlock(&bp->hwrm_cmd_lock);
return 0; return 0;
} }
static int bnxt_hwrm_queue_cfg(struct bnxt *bp, unsigned int lltc_mask) static int bnxt_queue_remap(struct bnxt *bp, unsigned int lltc_mask)
{ {
struct hwrm_queue_cfg_input req = {0}; unsigned long qmap = 0;
int i; int max = bp->max_tc;
int i, j, rc;
if (netif_running(bp->dev)) /* Assign lossless TCs first */
bnxt_tx_disable(bp); for (i = 0, j = 0; i < max; ) {
if (lltc_mask & (1 << i)) {
if (BNXT_LLQ(bp->q_info[j].queue_profile)) {
bp->tc_to_qidx[i] = j;
__set_bit(j, &qmap);
i++;
}
j++;
continue;
}
i++;
}
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_CFG, -1, -1); for (i = 0, j = 0; i < max; i++) {
req.flags = cpu_to_le32(QUEUE_CFG_REQ_FLAGS_PATH_BIDIR); if (lltc_mask & (1 << i))
req.enables = cpu_to_le32(QUEUE_CFG_REQ_ENABLES_SERVICE_PROFILE); continue;
j = find_next_zero_bit(&qmap, max, j);
bp->tc_to_qidx[i] = j;
__set_bit(j, &qmap);
j++;
}
/* Configure lossless queues to lossy first */ if (netif_running(bp->dev)) {
req.service_profile = QUEUE_CFG_REQ_SERVICE_PROFILE_LOSSY; bnxt_close_nic(bp, false, false);
for (i = 0; i < bp->max_tc; i++) { rc = bnxt_open_nic(bp, false, false);
if (BNXT_LLQ(bp->q_info[i].queue_profile)) { if (rc) {
req.queue_id = cpu_to_le32(bp->q_info[i].queue_id); netdev_warn(bp->dev, "failed to open NIC, rc = %d\n", rc);
hwrm_send_message(bp, &req, sizeof(req), return rc;
HWRM_CMD_TIMEOUT);
bp->q_info[i].queue_profile =
QUEUE_CFG_REQ_SERVICE_PROFILE_LOSSY;
} }
} }
if (bp->ieee_ets) {
int tc = netdev_get_num_tc(bp->dev);
/* Now configure desired queues to lossless */ if (!tc)
req.service_profile = QUEUE_CFG_REQ_SERVICE_PROFILE_LOSSLESS; tc = 1;
for (i = 0; i < bp->max_tc; i++) { rc = bnxt_hwrm_queue_cos2bw_cfg(bp, bp->ieee_ets, tc);
if (lltc_mask & (1 << i)) { if (rc) {
req.queue_id = cpu_to_le32(bp->q_info[i].queue_id); netdev_warn(bp->dev, "failed to config BW, rc = %d\n", rc);
hwrm_send_message(bp, &req, sizeof(req), return rc;
HWRM_CMD_TIMEOUT); }
bp->q_info[i].queue_profile = rc = bnxt_hwrm_queue_pri2cos_cfg(bp, bp->ieee_ets);
QUEUE_CFG_REQ_SERVICE_PROFILE_LOSSLESS; if (rc) {
netdev_warn(bp->dev, "failed to config prio, rc = %d\n", rc);
return rc;
} }
} }
if (netif_running(bp->dev))
bnxt_tx_enable(bp);
return 0; return 0;
} }
...@@ -201,7 +235,7 @@ static int bnxt_hwrm_queue_pfc_cfg(struct bnxt *bp, struct ieee_pfc *pfc) ...@@ -201,7 +235,7 @@ static int bnxt_hwrm_queue_pfc_cfg(struct bnxt *bp, struct ieee_pfc *pfc)
struct ieee_ets *my_ets = bp->ieee_ets; struct ieee_ets *my_ets = bp->ieee_ets;
unsigned int tc_mask = 0, pri_mask = 0; unsigned int tc_mask = 0, pri_mask = 0;
u8 i, pri, lltc_count = 0; u8 i, pri, lltc_count = 0;
bool need_q_recfg = false; bool need_q_remap = false;
int rc; int rc;
if (!my_ets) if (!my_ets)
...@@ -221,21 +255,25 @@ static int bnxt_hwrm_queue_pfc_cfg(struct bnxt *bp, struct ieee_pfc *pfc) ...@@ -221,21 +255,25 @@ static int bnxt_hwrm_queue_pfc_cfg(struct bnxt *bp, struct ieee_pfc *pfc)
if (lltc_count > bp->max_lltc) if (lltc_count > bp->max_lltc)
return -EINVAL; return -EINVAL;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_PFCENABLE_CFG, -1, -1);
req.flags = cpu_to_le32(pri_mask);
rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
if (rc)
return rc;
for (i = 0; i < bp->max_tc; i++) { for (i = 0; i < bp->max_tc; i++) {
if (tc_mask & (1 << i)) { if (tc_mask & (1 << i)) {
if (!BNXT_LLQ(bp->q_info[i].queue_profile)) u8 qidx = bp->tc_to_qidx[i];
need_q_recfg = true;
if (!BNXT_LLQ(bp->q_info[qidx].queue_profile)) {
need_q_remap = true;
break;
} }
} }
}
if (need_q_remap)
rc = bnxt_queue_remap(bp, tc_mask);
if (need_q_recfg) bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_PFCENABLE_CFG, -1, -1);
rc = bnxt_hwrm_queue_cfg(bp, tc_mask); req.flags = cpu_to_le32(pri_mask);
rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
if (rc)
return rc;
return rc; return rc;
} }
......
/* Broadcom NetXtreme-C/E network driver.
*
* Copyright (c) 2017-2018 Broadcom Limited
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation.
*/
#include <linux/debugfs.h>
#include <linux/module.h>
#include <linux/pci.h>
#include "bnxt_hsi.h"
#include <linux/net_dim.h>
#include "bnxt.h"
#include "bnxt_debugfs.h"
static struct dentry *bnxt_debug_mnt;
static ssize_t debugfs_dim_read(struct file *filep,
char __user *buffer,
size_t count, loff_t *ppos)
{
struct net_dim *dim = filep->private_data;
int len;
char *buf;
if (*ppos)
return 0;
if (!dim)
return -ENODEV;
buf = kasprintf(GFP_KERNEL,
"state = %d\n" \
"profile_ix = %d\n" \
"mode = %d\n" \
"tune_state = %d\n" \
"steps_right = %d\n" \
"steps_left = %d\n" \
"tired = %d\n",
dim->state,
dim->profile_ix,
dim->mode,
dim->tune_state,
dim->steps_right,
dim->steps_left,
dim->tired);
if (!buf)
return -ENOMEM;
if (count < strlen(buf)) {
kfree(buf);
return -ENOSPC;
}
len = simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
kfree(buf);
return len;
}
static const struct file_operations debugfs_dim_fops = {
.owner = THIS_MODULE,
.open = simple_open,
.read = debugfs_dim_read,
};
static struct dentry *debugfs_dim_ring_init(struct net_dim *dim, int ring_idx,
struct dentry *dd)
{
static char qname[16];
snprintf(qname, 10, "%d", ring_idx);
return debugfs_create_file(qname, 0600, dd,
dim, &debugfs_dim_fops);
}
void bnxt_debug_dev_init(struct bnxt *bp)
{
const char *pname = pci_name(bp->pdev);
struct dentry *pdevf;
int i;
bp->debugfs_pdev = debugfs_create_dir(pname, bnxt_debug_mnt);
if (bp->debugfs_pdev) {
pdevf = debugfs_create_dir("dim", bp->debugfs_pdev);
if (!pdevf) {
pr_err("failed to create debugfs entry %s/dim\n",
pname);
return;
}
bp->debugfs_dim = pdevf;
/* create files for each rx ring */
for (i = 0; i < bp->cp_nr_rings; i++) {
struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
if (cpr && bp->bnapi[i]->rx_ring) {
pdevf = debugfs_dim_ring_init(&cpr->dim, i,
bp->debugfs_dim);
if (!pdevf)
pr_err("failed to create debugfs entry %s/dim/%d\n",
pname, i);
}
}
} else {
pr_err("failed to create debugfs entry %s\n", pname);
}
}
void bnxt_debug_dev_exit(struct bnxt *bp)
{
if (bp) {
debugfs_remove_recursive(bp->debugfs_pdev);
bp->debugfs_pdev = NULL;
}
}
void bnxt_debug_init(void)
{
bnxt_debug_mnt = debugfs_create_dir("bnxt_en", NULL);
if (!bnxt_debug_mnt)
pr_err("failed to init bnxt_en debugfs\n");
}
void bnxt_debug_exit(void)
{
debugfs_remove_recursive(bnxt_debug_mnt);
}
/* Broadcom NetXtreme-C/E network driver.
*
* Copyright (c) 2017-2018 Broadcom Limited
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation.
*/
#include "bnxt_hsi.h"
#include "bnxt.h"
#ifdef CONFIG_DEBUG_FS
void bnxt_debug_init(void);
void bnxt_debug_exit(void);
void bnxt_debug_dev_init(struct bnxt *bp);
void bnxt_debug_dev_exit(struct bnxt *bp);
#else
static inline void bnxt_debug_init(void) {}
static inline void bnxt_debug_exit(void) {}
static inline void bnxt_debug_dev_init(struct bnxt *bp) {}
static inline void bnxt_debug_dev_exit(struct bnxt *bp) {}
#endif
...@@ -140,6 +140,19 @@ static int bnxt_set_coalesce(struct net_device *dev, ...@@ -140,6 +140,19 @@ static int bnxt_set_coalesce(struct net_device *dev,
#define BNXT_RX_STATS_EXT_ENTRY(counter) \ #define BNXT_RX_STATS_EXT_ENTRY(counter) \
{ BNXT_RX_STATS_EXT_OFFSET(counter), __stringify(counter) } { BNXT_RX_STATS_EXT_OFFSET(counter), __stringify(counter) }
enum {
RX_TOTAL_DISCARDS,
TX_TOTAL_DISCARDS,
};
static struct {
u64 counter;
char string[ETH_GSTRING_LEN];
} bnxt_sw_func_stats[] = {
{0, "rx_total_discard_pkts"},
{0, "tx_total_discard_pkts"},
};
static const struct { static const struct {
long offset; long offset;
char string[ETH_GSTRING_LEN]; char string[ETH_GSTRING_LEN];
...@@ -237,6 +250,7 @@ static const struct { ...@@ -237,6 +250,7 @@ static const struct {
BNXT_RX_STATS_EXT_ENTRY(resume_roce_pause_events), BNXT_RX_STATS_EXT_ENTRY(resume_roce_pause_events),
}; };
#define BNXT_NUM_SW_FUNC_STATS ARRAY_SIZE(bnxt_sw_func_stats)
#define BNXT_NUM_PORT_STATS ARRAY_SIZE(bnxt_port_stats_arr) #define BNXT_NUM_PORT_STATS ARRAY_SIZE(bnxt_port_stats_arr)
#define BNXT_NUM_PORT_STATS_EXT ARRAY_SIZE(bnxt_port_stats_ext_arr) #define BNXT_NUM_PORT_STATS_EXT ARRAY_SIZE(bnxt_port_stats_ext_arr)
...@@ -244,6 +258,8 @@ static int bnxt_get_num_stats(struct bnxt *bp) ...@@ -244,6 +258,8 @@ static int bnxt_get_num_stats(struct bnxt *bp)
{ {
int num_stats = BNXT_NUM_STATS * bp->cp_nr_rings; int num_stats = BNXT_NUM_STATS * bp->cp_nr_rings;
num_stats += BNXT_NUM_SW_FUNC_STATS;
if (bp->flags & BNXT_FLAG_PORT_STATS) if (bp->flags & BNXT_FLAG_PORT_STATS)
num_stats += BNXT_NUM_PORT_STATS; num_stats += BNXT_NUM_PORT_STATS;
...@@ -279,6 +295,9 @@ static void bnxt_get_ethtool_stats(struct net_device *dev, ...@@ -279,6 +295,9 @@ static void bnxt_get_ethtool_stats(struct net_device *dev,
if (!bp->bnapi) if (!bp->bnapi)
return; return;
for (i = 0; i < BNXT_NUM_SW_FUNC_STATS; i++)
bnxt_sw_func_stats[i].counter = 0;
for (i = 0; i < bp->cp_nr_rings; i++) { for (i = 0; i < bp->cp_nr_rings; i++) {
struct bnxt_napi *bnapi = bp->bnapi[i]; struct bnxt_napi *bnapi = bp->bnapi[i];
struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
...@@ -288,7 +307,16 @@ static void bnxt_get_ethtool_stats(struct net_device *dev, ...@@ -288,7 +307,16 @@ static void bnxt_get_ethtool_stats(struct net_device *dev,
for (k = 0; k < stat_fields; j++, k++) for (k = 0; k < stat_fields; j++, k++)
buf[j] = le64_to_cpu(hw_stats[k]); buf[j] = le64_to_cpu(hw_stats[k]);
buf[j++] = cpr->rx_l4_csum_errors; buf[j++] = cpr->rx_l4_csum_errors;
bnxt_sw_func_stats[RX_TOTAL_DISCARDS].counter +=
le64_to_cpu(cpr->hw_stats->rx_discard_pkts);
bnxt_sw_func_stats[TX_TOTAL_DISCARDS].counter +=
le64_to_cpu(cpr->hw_stats->tx_discard_pkts);
} }
for (i = 0; i < BNXT_NUM_SW_FUNC_STATS; i++, j++)
buf[j] = bnxt_sw_func_stats[i].counter;
if (bp->flags & BNXT_FLAG_PORT_STATS) { if (bp->flags & BNXT_FLAG_PORT_STATS) {
__le64 *port_stats = (__le64 *)bp->hw_rx_port_stats; __le64 *port_stats = (__le64 *)bp->hw_rx_port_stats;
...@@ -359,6 +387,11 @@ static void bnxt_get_strings(struct net_device *dev, u32 stringset, u8 *buf) ...@@ -359,6 +387,11 @@ static void bnxt_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
sprintf(buf, "[%d]: rx_l4_csum_errors", i); sprintf(buf, "[%d]: rx_l4_csum_errors", i);
buf += ETH_GSTRING_LEN; buf += ETH_GSTRING_LEN;
} }
for (i = 0; i < BNXT_NUM_SW_FUNC_STATS; i++) {
strcpy(buf, bnxt_sw_func_stats[i].string);
buf += ETH_GSTRING_LEN;
}
if (bp->flags & BNXT_FLAG_PORT_STATS) { if (bp->flags & BNXT_FLAG_PORT_STATS) {
for (i = 0; i < BNXT_NUM_PORT_STATS; i++) { for (i = 0; i < BNXT_NUM_PORT_STATS; i++) {
strcpy(buf, bnxt_port_stats_arr[i].string); strcpy(buf, bnxt_port_stats_arr[i].string);
...@@ -551,6 +584,8 @@ static int bnxt_set_channels(struct net_device *dev, ...@@ -551,6 +584,8 @@ static int bnxt_set_channels(struct net_device *dev,
* to renable * to renable
*/ */
} }
} else {
rc = bnxt_reserve_rings(bp);
} }
return rc; return rc;
...@@ -1785,6 +1820,11 @@ static int nvm_get_dir_info(struct net_device *dev, u32 *entries, u32 *length) ...@@ -1785,6 +1820,11 @@ static int nvm_get_dir_info(struct net_device *dev, u32 *entries, u32 *length)
static int bnxt_get_eeprom_len(struct net_device *dev) static int bnxt_get_eeprom_len(struct net_device *dev)
{ {
struct bnxt *bp = netdev_priv(dev);
if (BNXT_VF(bp))
return 0;
/* The -1 return value allows the entire 32-bit range of offsets to be /* The -1 return value allows the entire 32-bit range of offsets to be
* passed via the ethtool command-line utility. * passed via the ethtool command-line utility.
*/ */
......
...@@ -462,13 +462,13 @@ static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs) ...@@ -462,13 +462,13 @@ static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs)
vf_vnics = hw_resc->max_vnics - bp->nr_vnics; vf_vnics = hw_resc->max_vnics - bp->nr_vnics;
vf_vnics = min_t(u16, vf_vnics, vf_rx_rings); vf_vnics = min_t(u16, vf_vnics, vf_rx_rings);
req.min_rsscos_ctx = cpu_to_le16(1); req.min_rsscos_ctx = cpu_to_le16(BNXT_VF_MIN_RSS_CTX);
req.max_rsscos_ctx = cpu_to_le16(1); req.max_rsscos_ctx = cpu_to_le16(BNXT_VF_MAX_RSS_CTX);
if (pf->vf_resv_strategy == BNXT_VF_RESV_STRATEGY_MINIMAL) { if (pf->vf_resv_strategy == BNXT_VF_RESV_STRATEGY_MINIMAL) {
req.min_cmpl_rings = cpu_to_le16(1); req.min_cmpl_rings = cpu_to_le16(1);
req.min_tx_rings = cpu_to_le16(1); req.min_tx_rings = cpu_to_le16(1);
req.min_rx_rings = cpu_to_le16(1); req.min_rx_rings = cpu_to_le16(1);
req.min_l2_ctxs = cpu_to_le16(1); req.min_l2_ctxs = cpu_to_le16(BNXT_VF_MIN_L2_CTX);
req.min_vnics = cpu_to_le16(1); req.min_vnics = cpu_to_le16(1);
req.min_stat_ctx = cpu_to_le16(1); req.min_stat_ctx = cpu_to_le16(1);
req.min_hw_ring_grps = cpu_to_le16(1); req.min_hw_ring_grps = cpu_to_le16(1);
...@@ -483,7 +483,7 @@ static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs) ...@@ -483,7 +483,7 @@ static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs)
req.min_cmpl_rings = cpu_to_le16(vf_cp_rings); req.min_cmpl_rings = cpu_to_le16(vf_cp_rings);
req.min_tx_rings = cpu_to_le16(vf_tx_rings); req.min_tx_rings = cpu_to_le16(vf_tx_rings);
req.min_rx_rings = cpu_to_le16(vf_rx_rings); req.min_rx_rings = cpu_to_le16(vf_rx_rings);
req.min_l2_ctxs = cpu_to_le16(4); req.min_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX);
req.min_vnics = cpu_to_le16(vf_vnics); req.min_vnics = cpu_to_le16(vf_vnics);
req.min_stat_ctx = cpu_to_le16(vf_stat_ctx); req.min_stat_ctx = cpu_to_le16(vf_stat_ctx);
req.min_hw_ring_grps = cpu_to_le16(vf_ring_grps); req.min_hw_ring_grps = cpu_to_le16(vf_ring_grps);
...@@ -491,7 +491,7 @@ static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs) ...@@ -491,7 +491,7 @@ static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs)
req.max_cmpl_rings = cpu_to_le16(vf_cp_rings); req.max_cmpl_rings = cpu_to_le16(vf_cp_rings);
req.max_tx_rings = cpu_to_le16(vf_tx_rings); req.max_tx_rings = cpu_to_le16(vf_tx_rings);
req.max_rx_rings = cpu_to_le16(vf_rx_rings); req.max_rx_rings = cpu_to_le16(vf_rx_rings);
req.max_l2_ctxs = cpu_to_le16(4); req.max_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX);
req.max_vnics = cpu_to_le16(vf_vnics); req.max_vnics = cpu_to_le16(vf_vnics);
req.max_stat_ctx = cpu_to_le16(vf_stat_ctx); req.max_stat_ctx = cpu_to_le16(vf_stat_ctx);
req.max_hw_ring_grps = cpu_to_le16(vf_ring_grps); req.max_hw_ring_grps = cpu_to_le16(vf_ring_grps);
...@@ -809,6 +809,9 @@ static int bnxt_hwrm_fwd_resp(struct bnxt *bp, struct bnxt_vf_info *vf, ...@@ -809,6 +809,9 @@ static int bnxt_hwrm_fwd_resp(struct bnxt *bp, struct bnxt_vf_info *vf,
struct hwrm_fwd_resp_input req = {0}; struct hwrm_fwd_resp_input req = {0};
struct hwrm_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr; struct hwrm_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
if (BNXT_FWD_RESP_SIZE_ERR(msg_size))
return -EINVAL;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FWD_RESP, -1, -1); bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FWD_RESP, -1, -1);
/* Set the new target id */ /* Set the new target id */
...@@ -845,6 +848,9 @@ static int bnxt_hwrm_fwd_err_resp(struct bnxt *bp, struct bnxt_vf_info *vf, ...@@ -845,6 +848,9 @@ static int bnxt_hwrm_fwd_err_resp(struct bnxt *bp, struct bnxt_vf_info *vf,
struct hwrm_reject_fwd_resp_input req = {0}; struct hwrm_reject_fwd_resp_input req = {0};
struct hwrm_reject_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr; struct hwrm_reject_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
if (BNXT_REJ_FWD_RESP_SIZE_ERR(msg_size))
return -EINVAL;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_REJECT_FWD_RESP, -1, -1); bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_REJECT_FWD_RESP, -1, -1);
/* Set the new target id */ /* Set the new target id */
req.target_id = cpu_to_le16(vf->fw_fid); req.target_id = cpu_to_le16(vf->fw_fid);
...@@ -877,6 +883,9 @@ static int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, struct bnxt_vf_info *vf, ...@@ -877,6 +883,9 @@ static int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, struct bnxt_vf_info *vf,
struct hwrm_exec_fwd_resp_input req = {0}; struct hwrm_exec_fwd_resp_input req = {0};
struct hwrm_exec_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr; struct hwrm_exec_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
if (BNXT_EXEC_FWD_RESP_SIZE_ERR(msg_size))
return -EINVAL;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_EXEC_FWD_RESP, -1, -1); bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_EXEC_FWD_RESP, -1, -1);
/* Set the new target id */ /* Set the new target id */
req.target_id = cpu_to_le16(vf->fw_fid); req.target_id = cpu_to_le16(vf->fw_fid);
......
...@@ -11,6 +11,23 @@ ...@@ -11,6 +11,23 @@
#ifndef BNXT_SRIOV_H #ifndef BNXT_SRIOV_H
#define BNXT_SRIOV_H #define BNXT_SRIOV_H
#define BNXT_FWD_RESP_SIZE_ERR(n) \
((offsetof(struct hwrm_fwd_resp_input, encap_resp) + n) > \
sizeof(struct hwrm_fwd_resp_input))
#define BNXT_EXEC_FWD_RESP_SIZE_ERR(n) \
((offsetof(struct hwrm_exec_fwd_resp_input, encap_request) + n) >\
offsetof(struct hwrm_exec_fwd_resp_input, encap_resp_target_id))
#define BNXT_REJ_FWD_RESP_SIZE_ERR(n) \
((offsetof(struct hwrm_reject_fwd_resp_input, encap_request) + n) >\
offsetof(struct hwrm_reject_fwd_resp_input, encap_resp_target_id))
#define BNXT_VF_MIN_RSS_CTX 1
#define BNXT_VF_MAX_RSS_CTX 1
#define BNXT_VF_MIN_L2_CTX 1
#define BNXT_VF_MAX_L2_CTX 4
int bnxt_get_vf_config(struct net_device *, int, struct ifla_vf_info *); int bnxt_get_vf_config(struct net_device *, int, struct ifla_vf_info *);
int bnxt_set_vf_mac(struct net_device *, int, u8 *); int bnxt_set_vf_mac(struct net_device *, int, u8 *);
int bnxt_set_vf_vlan(struct net_device *, int, u16, u8, __be16); int bnxt_set_vf_vlan(struct net_device *, int, u16, u8, __be16);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment