Commit 43e7a0e5 authored by David S. Miller's avatar David S. Miller

Merge branch 'bnxt_en-update'

Michael Chan says:

====================
bnxt_en update.

This patchset removes the PCIe histogram and other debug register
data from ethtool -S. The removed data are not counters and they have
very large and constantly fluctuating values that are not suitable for
the ethtool -S decimal counter display.

The rest of the patches implement counter rollover for all hardware
counters that are not 64-bit counters.  Different generations of
hardware have different counter widths.  The driver will now query
the counter widths of all counters from firmware and implement
rollover support on all non-64-bit counters.

The last patch adds the PCIe histogram and other PCIe register data back
using the ethtool -d interface.

v2: Fix bnxt_re RDMA driver compile issue.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 350e7ab9 b5d600b0
......@@ -132,7 +132,7 @@ int bnxt_re_ib_get_hw_stats(struct ib_device *ibdev,
stats->value[BNXT_RE_RECOVERABLE_ERRORS] =
le64_to_cpu(bnxt_re_stats->tx_bcast_pkts);
stats->value[BNXT_RE_RX_DROPS] =
le64_to_cpu(bnxt_re_stats->rx_drop_pkts);
le64_to_cpu(bnxt_re_stats->rx_error_pkts);
stats->value[BNXT_RE_RX_DISCARDS] =
le64_to_cpu(bnxt_re_stats->rx_discard_pkts);
stats->value[BNXT_RE_RX_PKTS] =
......
This diff is collapsed.
......@@ -919,6 +919,14 @@ struct bnxt_sw_stats {
struct bnxt_cmn_sw_stats cmn;
};
struct bnxt_stats_mem {
u64 *sw_stats;
u64 *hw_masks;
void *hw_stats;
dma_addr_t hw_stats_map;
int len;
};
struct bnxt_cp_ring_info {
struct bnxt_napi *bnapi;
u32 cp_raw_cons;
......@@ -943,8 +951,7 @@ struct bnxt_cp_ring_info {
dma_addr_t cp_desc_mapping[MAX_CP_PAGES];
struct ctx_hw_stats *hw_stats;
dma_addr_t hw_stats_map;
struct bnxt_stats_mem stats;
u32 hw_stats_ctx_id;
struct bnxt_sw_stats sw_stats;
......@@ -1135,6 +1142,50 @@ struct bnxt_ntuple_filter {
#define BNXT_FLTR_UPDATE 1
};
struct hwrm_port_phy_qcfg_output_compat {
__le16 error_code;
__le16 req_type;
__le16 seq_id;
__le16 resp_len;
u8 link;
u8 link_signal_mode;
__le16 link_speed;
u8 duplex_cfg;
u8 pause;
__le16 support_speeds;
__le16 force_link_speed;
u8 auto_mode;
u8 auto_pause;
__le16 auto_link_speed;
__le16 auto_link_speed_mask;
u8 wirespeed;
u8 lpbk;
u8 force_pause;
u8 module_status;
__le32 preemphasis;
u8 phy_maj;
u8 phy_min;
u8 phy_bld;
u8 phy_type;
u8 media_type;
u8 xcvr_pkg_type;
u8 eee_config_phy_addr;
u8 parallel_detect;
__le16 link_partner_adv_speeds;
u8 link_partner_adv_auto_mode;
u8 link_partner_adv_pause;
__le16 adv_eee_link_speed_mask;
__le16 link_partner_adv_eee_link_speed_mask;
__le32 xcvr_identifier_type_tx_lpi_timer;
__le16 fec_cfg;
u8 duplex_state;
u8 option_flags;
char phy_vendor_name[16];
char phy_vendor_partnumber[16];
u8 unused_0[7];
u8 valid;
};
struct bnxt_link_info {
u8 phy_type;
u8 media_type;
......@@ -1253,6 +1304,9 @@ struct bnxt_test_info {
char string[BNXT_MAX_TEST][ETH_GSTRING_LEN];
};
#define CHIMP_REG_VIEW_ADDR \
((bp->flags & BNXT_FLAG_CHIP_P5) ? 0x80000000 : 0xb1000000)
#define BNXT_GRCPF_REG_CHIMP_COMM 0x0
#define BNXT_GRCPF_REG_CHIMP_COMM_TRIGGER 0x100
#define BNXT_GRCPF_REG_WINDOW_BASE_OUT 0x400
......@@ -1566,7 +1620,6 @@ struct bnxt {
#define BNXT_FLAG_DIM 0x2000000
#define BNXT_FLAG_ROCE_MIRROR_CAP 0x4000000
#define BNXT_FLAG_PORT_STATS_EXT 0x10000000
#define BNXT_FLAG_PCIE_STATS 0x40000000
#define BNXT_FLAG_ALL_CONFIG_FEATS (BNXT_FLAG_TPA | \
BNXT_FLAG_RFS | \
......@@ -1719,6 +1772,7 @@ struct bnxt {
#define BNXT_FW_CAP_VLAN_RX_STRIP 0x01000000
#define BNXT_FW_CAP_VLAN_TX_INSERT 0x02000000
#define BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED 0x04000000
#define BNXT_FW_CAP_PORT_STATS_NO_RESET 0x10000000
#define BNXT_NEW_RM(bp) ((bp)->fw_cap & BNXT_FW_CAP_NEW_RM)
u32 hwrm_spec_code;
......@@ -1733,17 +1787,9 @@ struct bnxt {
dma_addr_t hwrm_cmd_kong_resp_dma_addr;
struct rtnl_link_stats64 net_stats_prev;
struct rx_port_stats *hw_rx_port_stats;
struct tx_port_stats *hw_tx_port_stats;
struct rx_port_stats_ext *hw_rx_port_stats_ext;
struct tx_port_stats_ext *hw_tx_port_stats_ext;
struct pcie_ctx_hw_stats *hw_pcie_stats;
dma_addr_t hw_rx_port_stats_map;
dma_addr_t hw_tx_port_stats_map;
dma_addr_t hw_rx_port_stats_ext_map;
dma_addr_t hw_tx_port_stats_ext_map;
dma_addr_t hw_pcie_stats_map;
int hw_port_stats_size;
struct bnxt_stats_mem port_stats;
struct bnxt_stats_mem rx_port_stats_ext;
struct bnxt_stats_mem tx_port_stats_ext;
u16 fw_rx_stats_ext_size;
u16 fw_tx_stats_ext_size;
u16 hw_ring_stats_size;
......@@ -1885,12 +1931,27 @@ struct bnxt {
struct device *hwmon_dev;
};
#define BNXT_GET_RING_STATS64(sw, counter) \
(*((sw) + offsetof(struct ctx_hw_stats, counter) / 8))
#define BNXT_GET_RX_PORT_STATS64(sw, counter) \
(*((sw) + offsetof(struct rx_port_stats, counter) / 8))
#define BNXT_GET_TX_PORT_STATS64(sw, counter) \
(*((sw) + offsetof(struct tx_port_stats, counter) / 8))
#define BNXT_PORT_STATS_SIZE \
(sizeof(struct rx_port_stats) + sizeof(struct tx_port_stats) + 1024)
#define BNXT_TX_PORT_STATS_BYTE_OFFSET \
(sizeof(struct rx_port_stats) + 512)
#define BNXT_RX_STATS_OFFSET(counter) \
(offsetof(struct rx_port_stats, counter) / 8)
#define BNXT_TX_STATS_OFFSET(counter) \
((offsetof(struct tx_port_stats, counter) + \
sizeof(struct rx_port_stats) + 512) / 8)
BNXT_TX_PORT_STATS_BYTE_OFFSET) / 8)
#define BNXT_RX_STATS_EXT_OFFSET(counter) \
(offsetof(struct rx_port_stats_ext, counter) / 8)
......@@ -1898,9 +1959,6 @@ struct bnxt {
#define BNXT_TX_STATS_EXT_OFFSET(counter) \
(offsetof(struct tx_port_stats_ext, counter) / 8)
#define BNXT_PCIE_STATS_OFFSET(counter) \
(offsetof(struct pcie_ctx_hw_stats, counter) / 8)
#define BNXT_HW_FEATURE_VLAN_ALL_RX \
(NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX)
#define BNXT_HW_FEATURE_VLAN_ALL_TX \
......@@ -2062,6 +2120,8 @@ int bnxt_open_nic(struct bnxt *, bool, bool);
int bnxt_half_open_nic(struct bnxt *bp);
void bnxt_half_close_nic(struct bnxt *bp);
int bnxt_close_nic(struct bnxt *, bool, bool);
int bnxt_dbg_hwrm_rd_reg(struct bnxt *bp, u32 reg_off, u16 num_words,
u32 *reg_buf);
void bnxt_fw_exception(struct bnxt *bp);
void bnxt_fw_reset(struct bnxt *bp);
int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
......
......@@ -544,7 +544,7 @@ static int bnxt_dcbnl_ieee_setets(struct net_device *dev, struct ieee_ets *ets)
static int bnxt_dcbnl_ieee_getpfc(struct net_device *dev, struct ieee_pfc *pfc)
{
struct bnxt *bp = netdev_priv(dev);
__le64 *stats = (__le64 *)bp->hw_rx_port_stats;
__le64 *stats = bp->port_stats.hw_stats;
struct ieee_pfc *my_pfc = bp->ieee_pfc;
long rx_off, tx_off;
int i, rc;
......
......@@ -142,7 +142,7 @@ static const char * const bnxt_ring_rx_stats_str[] = {
"rx_mcast_packets",
"rx_bcast_packets",
"rx_discards",
"rx_drops",
"rx_errors",
"rx_ucast_bytes",
"rx_mcast_bytes",
"rx_bcast_bytes",
......@@ -152,8 +152,8 @@ static const char * const bnxt_ring_tx_stats_str[] = {
"tx_ucast_packets",
"tx_mcast_packets",
"tx_bcast_packets",
"tx_errors",
"tx_discards",
"tx_drops",
"tx_ucast_bytes",
"tx_mcast_bytes",
"tx_bcast_bytes",
......@@ -293,9 +293,6 @@ static const char * const bnxt_cmn_sw_stats_str[] = {
BNXT_TX_STATS_PRI_ENTRY(counter, 6), \
BNXT_TX_STATS_PRI_ENTRY(counter, 7)
#define BNXT_PCIE_STATS_ENTRY(counter) \
{ BNXT_PCIE_STATS_OFFSET(counter), __stringify(counter) }
enum {
RX_TOTAL_DISCARDS,
TX_TOTAL_DISCARDS,
......@@ -454,24 +451,6 @@ static const struct {
BNXT_TX_STATS_PRI_ENTRIES(tx_packets),
};
static const struct {
long offset;
char string[ETH_GSTRING_LEN];
} bnxt_pcie_stats_arr[] = {
BNXT_PCIE_STATS_ENTRY(pcie_pl_signal_integrity),
BNXT_PCIE_STATS_ENTRY(pcie_dl_signal_integrity),
BNXT_PCIE_STATS_ENTRY(pcie_tl_signal_integrity),
BNXT_PCIE_STATS_ENTRY(pcie_link_integrity),
BNXT_PCIE_STATS_ENTRY(pcie_tx_traffic_rate),
BNXT_PCIE_STATS_ENTRY(pcie_rx_traffic_rate),
BNXT_PCIE_STATS_ENTRY(pcie_tx_dllp_statistics),
BNXT_PCIE_STATS_ENTRY(pcie_rx_dllp_statistics),
BNXT_PCIE_STATS_ENTRY(pcie_equalization_time),
BNXT_PCIE_STATS_ENTRY(pcie_ltssm_histogram[0]),
BNXT_PCIE_STATS_ENTRY(pcie_ltssm_histogram[2]),
BNXT_PCIE_STATS_ENTRY(pcie_recovery_histogram),
};
#define BNXT_NUM_SW_FUNC_STATS ARRAY_SIZE(bnxt_sw_func_stats)
#define BNXT_NUM_PORT_STATS ARRAY_SIZE(bnxt_port_stats_arr)
#define BNXT_NUM_STATS_PRI \
......@@ -479,7 +458,6 @@ static const struct {
ARRAY_SIZE(bnxt_rx_pkts_pri_arr) + \
ARRAY_SIZE(bnxt_tx_bytes_pri_arr) + \
ARRAY_SIZE(bnxt_tx_pkts_pri_arr))
#define BNXT_NUM_PCIE_STATS ARRAY_SIZE(bnxt_pcie_stats_arr)
static int bnxt_get_num_tpa_ring_stats(struct bnxt *bp)
{
......@@ -526,9 +504,6 @@ static int bnxt_get_num_stats(struct bnxt *bp)
num_stats += BNXT_NUM_STATS_PRI;
}
if (bp->flags & BNXT_FLAG_PCIE_STATS)
num_stats += BNXT_NUM_PCIE_STATS;
return num_stats;
}
......@@ -584,19 +559,19 @@ static void bnxt_get_ethtool_stats(struct net_device *dev,
for (i = 0; i < bp->cp_nr_rings; i++) {
struct bnxt_napi *bnapi = bp->bnapi[i];
struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
__le64 *hw_stats = (__le64 *)cpr->hw_stats;
u64 *sw_stats = cpr->stats.sw_stats;
u64 *sw;
int k;
if (is_rx_ring(bp, i)) {
for (k = 0; k < NUM_RING_RX_HW_STATS; j++, k++)
buf[j] = le64_to_cpu(hw_stats[k]);
buf[j] = sw_stats[k];
}
if (is_tx_ring(bp, i)) {
k = NUM_RING_RX_HW_STATS;
for (; k < NUM_RING_RX_HW_STATS + NUM_RING_TX_HW_STATS;
j++, k++)
buf[j] = le64_to_cpu(hw_stats[k]);
buf[j] = sw_stats[k];
}
if (!tpa_stats || !is_rx_ring(bp, i))
goto skip_tpa_ring_stats;
......@@ -604,7 +579,7 @@ static void bnxt_get_ethtool_stats(struct net_device *dev,
k = NUM_RING_RX_HW_STATS + NUM_RING_TX_HW_STATS;
for (; k < NUM_RING_RX_HW_STATS + NUM_RING_TX_HW_STATS +
tpa_stats; j++, k++)
buf[j] = le64_to_cpu(hw_stats[k]);
buf[j] = sw_stats[k];
skip_tpa_ring_stats:
sw = (u64 *)&cpr->sw_stats.rx;
......@@ -618,9 +593,9 @@ static void bnxt_get_ethtool_stats(struct net_device *dev,
buf[j] = sw[k];
bnxt_sw_func_stats[RX_TOTAL_DISCARDS].counter +=
le64_to_cpu(cpr->hw_stats->rx_discard_pkts);
BNXT_GET_RING_STATS64(sw_stats, rx_discard_pkts);
bnxt_sw_func_stats[TX_TOTAL_DISCARDS].counter +=
le64_to_cpu(cpr->hw_stats->tx_discard_pkts);
BNXT_GET_RING_STATS64(sw_stats, tx_discard_pkts);
}
for (i = 0; i < BNXT_NUM_SW_FUNC_STATS; i++, j++)
......@@ -628,58 +603,48 @@ static void bnxt_get_ethtool_stats(struct net_device *dev,
skip_ring_stats:
if (bp->flags & BNXT_FLAG_PORT_STATS) {
__le64 *port_stats = (__le64 *)bp->hw_rx_port_stats;
u64 *port_stats = bp->port_stats.sw_stats;
for (i = 0; i < BNXT_NUM_PORT_STATS; i++, j++) {
buf[j] = le64_to_cpu(*(port_stats +
bnxt_port_stats_arr[i].offset));
}
for (i = 0; i < BNXT_NUM_PORT_STATS; i++, j++)
buf[j] = *(port_stats + bnxt_port_stats_arr[i].offset);
}
if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) {
__le64 *rx_port_stats_ext = (__le64 *)bp->hw_rx_port_stats_ext;
__le64 *tx_port_stats_ext = (__le64 *)bp->hw_tx_port_stats_ext;
u64 *rx_port_stats_ext = bp->rx_port_stats_ext.sw_stats;
u64 *tx_port_stats_ext = bp->tx_port_stats_ext.sw_stats;
for (i = 0; i < bp->fw_rx_stats_ext_size; i++, j++) {
buf[j] = le64_to_cpu(*(rx_port_stats_ext +
bnxt_port_stats_ext_arr[i].offset));
buf[j] = *(rx_port_stats_ext +
bnxt_port_stats_ext_arr[i].offset);
}
for (i = 0; i < bp->fw_tx_stats_ext_size; i++, j++) {
buf[j] = le64_to_cpu(*(tx_port_stats_ext +
bnxt_tx_port_stats_ext_arr[i].offset));
buf[j] = *(tx_port_stats_ext +
bnxt_tx_port_stats_ext_arr[i].offset);
}
if (bp->pri2cos_valid) {
for (i = 0; i < 8; i++, j++) {
long n = bnxt_rx_bytes_pri_arr[i].base_off +
bp->pri2cos_idx[i];
buf[j] = le64_to_cpu(*(rx_port_stats_ext + n));
buf[j] = *(rx_port_stats_ext + n);
}
for (i = 0; i < 8; i++, j++) {
long n = bnxt_rx_pkts_pri_arr[i].base_off +
bp->pri2cos_idx[i];
buf[j] = le64_to_cpu(*(rx_port_stats_ext + n));
buf[j] = *(rx_port_stats_ext + n);
}
for (i = 0; i < 8; i++, j++) {
long n = bnxt_tx_bytes_pri_arr[i].base_off +
bp->pri2cos_idx[i];
buf[j] = le64_to_cpu(*(tx_port_stats_ext + n));
buf[j] = *(tx_port_stats_ext + n);
}
for (i = 0; i < 8; i++, j++) {
long n = bnxt_tx_pkts_pri_arr[i].base_off +
bp->pri2cos_idx[i];
buf[j] = le64_to_cpu(*(tx_port_stats_ext + n));
}
}
buf[j] = *(tx_port_stats_ext + n);
}
if (bp->flags & BNXT_FLAG_PCIE_STATS) {
__le64 *pcie_stats = (__le64 *)bp->hw_pcie_stats;
for (i = 0; i < BNXT_NUM_PCIE_STATS; i++, j++) {
buf[j] = le64_to_cpu(*(pcie_stats +
bnxt_pcie_stats_arr[i].offset));
}
}
}
......@@ -782,12 +747,6 @@ static void bnxt_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
}
}
}
if (bp->flags & BNXT_FLAG_PCIE_STATS) {
for (i = 0; i < BNXT_NUM_PCIE_STATS; i++) {
strcpy(buf, bnxt_pcie_stats_arr[i].string);
buf += ETH_GSTRING_LEN;
}
}
break;
case ETH_SS_TEST:
if (bp->num_tests)
......@@ -1365,6 +1324,59 @@ static void bnxt_get_drvinfo(struct net_device *dev,
info->regdump_len = 0;
}
static int bnxt_get_regs_len(struct net_device *dev)
{
struct bnxt *bp = netdev_priv(dev);
int reg_len;
reg_len = BNXT_PXP_REG_LEN;
if (bp->fw_cap & BNXT_FW_CAP_PCIE_STATS_SUPPORTED)
reg_len += sizeof(struct pcie_ctx_hw_stats);
return reg_len;
}
static void bnxt_get_regs(struct net_device *dev, struct ethtool_regs *regs,
void *_p)
{
struct pcie_ctx_hw_stats *hw_pcie_stats;
struct hwrm_pcie_qstats_input req = {0};
struct bnxt *bp = netdev_priv(dev);
dma_addr_t hw_pcie_stats_addr;
int rc;
regs->version = 0;
bnxt_dbg_hwrm_rd_reg(bp, 0, BNXT_PXP_REG_LEN / 4, _p);
if (!(bp->fw_cap & BNXT_FW_CAP_PCIE_STATS_SUPPORTED))
return;
hw_pcie_stats = dma_alloc_coherent(&bp->pdev->dev,
sizeof(*hw_pcie_stats),
&hw_pcie_stats_addr, GFP_KERNEL);
if (!hw_pcie_stats)
return;
regs->version = 1;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PCIE_QSTATS, -1, -1);
req.pcie_stat_size = cpu_to_le16(sizeof(*hw_pcie_stats));
req.pcie_stat_host_addr = cpu_to_le64(hw_pcie_stats_addr);
mutex_lock(&bp->hwrm_cmd_lock);
rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
if (!rc) {
__le64 *src = (__le64 *)hw_pcie_stats;
u64 *dst = (u64 *)(_p + BNXT_PXP_REG_LEN);
int i;
for (i = 0; i < sizeof(*hw_pcie_stats) / sizeof(__le64); i++)
dst[i] = le64_to_cpu(src[i]);
}
mutex_unlock(&bp->hwrm_cmd_lock);
dma_free_coherent(&bp->pdev->dev, sizeof(*hw_pcie_stats), hw_pcie_stats,
hw_pcie_stats_addr);
}
static void bnxt_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
{
struct bnxt *bp = netdev_priv(dev);
......@@ -3640,6 +3652,8 @@ const struct ethtool_ops bnxt_ethtool_ops = {
.get_pauseparam = bnxt_get_pauseparam,
.set_pauseparam = bnxt_set_pauseparam,
.get_drvinfo = bnxt_get_drvinfo,
.get_regs_len = bnxt_get_regs_len,
.get_regs = bnxt_get_regs,
.get_wol = bnxt_get_wol,
.set_wol = bnxt_set_wol,
.get_coalesce = bnxt_get_coalesce,
......
......@@ -84,6 +84,8 @@ struct hwrm_dbg_cmn_output {
ETH_RESET_PHY | ETH_RESET_RAM) \
<< ETH_RESET_SHARED_SHIFT)
#define BNXT_PXP_REG_LEN 0x3110
extern const struct ethtool_ops bnxt_ethtool_ops;
u32 bnxt_get_rxfh_indir_size(struct net_device *dev);
......
......@@ -1029,7 +1029,7 @@ static int bnxt_vf_set_link(struct bnxt *bp, struct bnxt_vf_info *vf)
rc = bnxt_hwrm_exec_fwd_resp(
bp, vf, sizeof(struct hwrm_port_phy_qcfg_input));
} else {
struct hwrm_port_phy_qcfg_output phy_qcfg_resp;
struct hwrm_port_phy_qcfg_output_compat phy_qcfg_resp = {0};
struct hwrm_port_phy_qcfg_input *phy_qcfg_req;
phy_qcfg_req =
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment