Commit a2220b54 authored by Paolo Abeni's avatar Paolo Abeni

Merge branch 'cn10kb-mac-block-support'

Hariprasad Kelam says:

====================
CN10KB MAC block support

OcteonTx2's next gen platform the CN10KB has RPM_USX MAC which has a
different serdes when compared to RPM MAC. Though the underlying
HW is different, the CSR interface has been designed largely inline
with RPM MAC, with few exceptions though. So we are using the same
CGX driver for RPM_USX MAC as well and will have a different set of APIs
for RPM_USX where ever necessary.

The RPM and RPM_USX blocks support a different number of LMACS.
RPM_USX support 8 LMACS per MAC block whereas legacy RPM supports only 4
LMACS per MAC. with this RPM_USX support double the number of DMAC filters
and fifo size.

This patchset adds initial support for CN10KB's RPM_USX  MAC i.e
registering the driver and defining MAC operations (mac_ops). With these
changes PF and VF netdev packet path will work and PF and VF netdev drivers
are able to configure MAC features like pause frames,PFC and loopback etc.

Also implements FEC stats for CN10K Mac block RPM and CN10KB Mac block
RPM_USX and extends ethtool support for PF and VF drivers by defining
get_fec_stats API to display FEC stats.
====================

Link: https://lore.kernel.org/r/20221205070521.21860-1-hkelam@marvell.comSigned-off-by: default avatarPaolo Abeni <pabeni@redhat.com>
parents f82389ee 84ad3642
......@@ -64,6 +64,7 @@ static int cgx_fwi_link_change(struct cgx *cgx, int lmac_id, bool en);
static const struct pci_device_id cgx_id_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_CGX) },
{ PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10K_RPM) },
{ PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10KB_RPM) },
{ 0, } /* end of table */
};
......@@ -73,12 +74,13 @@ static bool is_dev_rpm(void *cgxd)
{
struct cgx *cgx = cgxd;
return (cgx->pdev->device == PCI_DEVID_CN10K_RPM);
return (cgx->pdev->device == PCI_DEVID_CN10K_RPM) ||
(cgx->pdev->device == PCI_DEVID_CN10KB_RPM);
}
bool is_lmac_valid(struct cgx *cgx, int lmac_id)
{
if (!cgx || lmac_id < 0 || lmac_id >= MAX_LMAC_PER_CGX)
if (!cgx || lmac_id < 0 || lmac_id >= cgx->max_lmac_per_mac)
return false;
return test_bit(lmac_id, &cgx->lmac_bmap);
}
......@@ -90,7 +92,7 @@ static int get_sequence_id_of_lmac(struct cgx *cgx, int lmac_id)
{
int tmp, id = 0;
for_each_set_bit(tmp, &cgx->lmac_bmap, MAX_LMAC_PER_CGX) {
for_each_set_bit(tmp, &cgx->lmac_bmap, cgx->max_lmac_per_mac) {
if (tmp == lmac_id)
break;
id++;
......@@ -121,7 +123,7 @@ u64 cgx_read(struct cgx *cgx, u64 lmac, u64 offset)
struct lmac *lmac_pdata(u8 lmac_id, struct cgx *cgx)
{
if (!cgx || lmac_id >= MAX_LMAC_PER_CGX)
if (!cgx || lmac_id >= cgx->max_lmac_per_mac)
return NULL;
return cgx->lmac_idmap[lmac_id];
......@@ -485,7 +487,7 @@ int cgx_set_pkind(void *cgxd, u8 lmac_id, int pkind)
if (!is_lmac_valid(cgx, lmac_id))
return -ENODEV;
cgx_write(cgx, lmac_id, CGXX_CMRX_RX_ID_MAP, (pkind & 0x3F));
cgx_write(cgx, lmac_id, cgx->mac_ops->rxid_map_offset, (pkind & 0x3F));
return 0;
}
......@@ -740,6 +742,10 @@ int cgx_get_fec_stats(void *cgxd, int lmac_id, struct cgx_fec_stats_rsp *rsp)
if (!cgx || lmac_id >= cgx->lmac_count)
return -ENODEV;
if (cgx->lmac_idmap[lmac_id]->link_info.fec == OTX2_FEC_NONE)
return 0;
fec_stats_count =
cgx_set_fec_stats_count(&cgx->lmac_idmap[lmac_id]->link_info);
if (cgx->lmac_idmap[lmac_id]->link_info.fec == OTX2_FEC_BASER) {
......@@ -1224,7 +1230,7 @@ static inline void link_status_user_format(u64 lstat,
linfo->speed = cgx_speed_mbps[FIELD_GET(RESP_LINKSTAT_SPEED, lstat)];
linfo->an = FIELD_GET(RESP_LINKSTAT_AN, lstat);
linfo->fec = FIELD_GET(RESP_LINKSTAT_FEC, lstat);
linfo->lmac_type_id = cgx_get_lmac_type(cgx, lmac_id);
linfo->lmac_type_id = FIELD_GET(RESP_LINKSTAT_LMAC_TYPE, lstat);
lmac_string = cgx_lmactype_string[linfo->lmac_type_id];
strncpy(linfo->lmac_type, lmac_string, LMACTYPE_STR_LEN - 1);
}
......@@ -1395,7 +1401,7 @@ int cgx_get_fwdata_base(u64 *base)
if (!cgx)
return -ENXIO;
first_lmac = find_first_bit(&cgx->lmac_bmap, MAX_LMAC_PER_CGX);
first_lmac = find_first_bit(&cgx->lmac_bmap, cgx->max_lmac_per_mac);
req = FIELD_SET(CMDREG_ID, CGX_CMD_GET_FWD_BASE, req);
err = cgx_fwi_cmd_generic(req, &resp, cgx, first_lmac);
if (!err)
......@@ -1484,7 +1490,7 @@ static int cgx_fwi_link_change(struct cgx *cgx, int lmac_id, bool enable)
static inline int cgx_fwi_read_version(u64 *resp, struct cgx *cgx)
{
int first_lmac = find_first_bit(&cgx->lmac_bmap, MAX_LMAC_PER_CGX);
int first_lmac = find_first_bit(&cgx->lmac_bmap, cgx->max_lmac_per_mac);
u64 req = 0;
req = FIELD_SET(CMDREG_ID, CGX_CMD_GET_FW_VER, req);
......@@ -1522,7 +1528,7 @@ static void cgx_lmac_linkup_work(struct work_struct *work)
int i, err;
/* Do Link up for all the enabled lmacs */
for_each_set_bit(i, &cgx->lmac_bmap, MAX_LMAC_PER_CGX) {
for_each_set_bit(i, &cgx->lmac_bmap, cgx->max_lmac_per_mac) {
err = cgx_fwi_link_change(cgx, i, true);
if (err)
dev_info(dev, "cgx port %d:%d Link up command failed\n",
......@@ -1542,14 +1548,6 @@ int cgx_lmac_linkup_start(void *cgxd)
return 0;
}
static void cgx_lmac_get_fifolen(struct cgx *cgx)
{
u64 cfg;
cfg = cgx_read(cgx, 0, CGX_CONST);
cgx->mac_ops->fifo_len = FIELD_GET(CGX_CONST_RXFIFO_SIZE, cfg);
}
static int cgx_configure_interrupt(struct cgx *cgx, struct lmac *lmac,
int cnt, bool req_free)
{
......@@ -1604,17 +1602,20 @@ static int cgx_lmac_init(struct cgx *cgx)
u64 lmac_list;
int i, err;
cgx_lmac_get_fifolen(cgx);
cgx->lmac_count = cgx->mac_ops->get_nr_lmacs(cgx);
/* lmac_list specifies which lmacs are enabled
* when bit n is set to 1, LMAC[n] is enabled
*/
if (cgx->mac_ops->non_contiguous_serdes_lane)
lmac_list = cgx_read(cgx, 0, CGXX_CMRX_RX_LMACS) & 0xFULL;
if (cgx->mac_ops->non_contiguous_serdes_lane) {
if (is_dev_rpm2(cgx))
lmac_list =
cgx_read(cgx, 0, RPM2_CMRX_RX_LMACS) & 0xFFULL;
else
lmac_list =
cgx_read(cgx, 0, CGXX_CMRX_RX_LMACS) & 0xFULL;
}
if (cgx->lmac_count > MAX_LMAC_PER_CGX)
cgx->lmac_count = MAX_LMAC_PER_CGX;
if (cgx->lmac_count > cgx->max_lmac_per_mac)
cgx->lmac_count = cgx->max_lmac_per_mac;
for (i = 0; i < cgx->lmac_count; i++) {
lmac = kzalloc(sizeof(struct lmac), GFP_KERNEL);
......@@ -1635,7 +1636,9 @@ static int cgx_lmac_init(struct cgx *cgx)
lmac->cgx = cgx;
lmac->mac_to_index_bmap.max =
MAX_DMAC_ENTRIES_PER_CGX / cgx->lmac_count;
cgx->mac_ops->dmac_filter_count /
cgx->lmac_count;
err = rvu_alloc_bitmap(&lmac->mac_to_index_bmap);
if (err)
goto err_name_free;
......@@ -1692,7 +1695,7 @@ static int cgx_lmac_exit(struct cgx *cgx)
}
/* Free all lmac related resources */
for_each_set_bit(i, &cgx->lmac_bmap, MAX_LMAC_PER_CGX) {
for_each_set_bit(i, &cgx->lmac_bmap, cgx->max_lmac_per_mac) {
lmac = cgx->lmac_idmap[i];
if (!lmac)
continue;
......@@ -1708,6 +1711,12 @@ static int cgx_lmac_exit(struct cgx *cgx)
static void cgx_populate_features(struct cgx *cgx)
{
u64 cfg;
cfg = cgx_read(cgx, 0, CGX_CONST);
cgx->mac_ops->fifo_len = FIELD_GET(CGX_CONST_RXFIFO_SIZE, cfg);
cgx->max_lmac_per_mac = FIELD_GET(CGX_CONST_MAX_LMACS, cfg);
if (is_dev_rpm(cgx))
cgx->hw_features = (RVU_LMAC_FEAT_DMACF | RVU_MAC_RPM |
RVU_LMAC_FEAT_FC | RVU_LMAC_FEAT_PTP);
......@@ -1716,6 +1725,15 @@ static void cgx_populate_features(struct cgx *cgx)
RVU_LMAC_FEAT_PTP | RVU_LMAC_FEAT_DMACF);
}
static u8 cgx_get_rxid_mapoffset(struct cgx *cgx)
{
if (cgx->pdev->subsystem_device == PCI_SUBSYS_DEVID_CNF10KB_RPM ||
is_dev_rpm2(cgx))
return 0x80;
else
return 0x60;
}
static struct mac_ops cgx_mac_ops = {
.name = "cgx",
.csr_offset = 0,
......@@ -1728,12 +1746,14 @@ static struct mac_ops cgx_mac_ops = {
.non_contiguous_serdes_lane = false,
.rx_stats_cnt = 9,
.tx_stats_cnt = 18,
.dmac_filter_count = 32,
.get_nr_lmacs = cgx_get_nr_lmacs,
.get_lmac_type = cgx_get_lmac_type,
.lmac_fifo_len = cgx_get_lmac_fifo_len,
.mac_lmac_intl_lbk = cgx_lmac_internal_loopback,
.mac_get_rx_stats = cgx_get_rx_stats,
.mac_get_tx_stats = cgx_get_tx_stats,
.get_fec_stats = cgx_get_fec_stats,
.mac_enadis_rx_pause_fwding = cgx_lmac_enadis_rx_pause_fwding,
.mac_get_pause_frm_status = cgx_lmac_get_pause_frm_status,
.mac_enadis_pause_frm = cgx_lmac_enadis_pause_frm,
......@@ -1759,11 +1779,13 @@ static int cgx_probe(struct pci_dev *pdev, const struct pci_device_id *id)
pci_set_drvdata(pdev, cgx);
/* Use mac_ops to get MAC specific features */
if (pdev->device == PCI_DEVID_CN10K_RPM)
cgx->mac_ops = rpm_get_mac_ops();
if (is_dev_rpm(cgx))
cgx->mac_ops = rpm_get_mac_ops(cgx);
else
cgx->mac_ops = &cgx_mac_ops;
cgx->mac_ops->rxid_map_offset = cgx_get_rxid_mapoffset(cgx);
err = pci_enable_device(pdev);
if (err) {
dev_err(dev, "Failed to enable PCI device\n");
......
......@@ -18,11 +18,7 @@
/* PCI BAR nos */
#define PCI_CFG_REG_BAR_NUM 0
#define CGX_ID_MASK 0x7
#define MAX_LMAC_PER_CGX 4
#define MAX_DMAC_ENTRIES_PER_CGX 32
#define CGX_FIFO_LEN 65536 /* 64K for both Rx & Tx */
#define CGX_OFFSET(x) ((x) * MAX_LMAC_PER_CGX)
#define CGX_ID_MASK 0xF
/* Registers */
#define CGXX_CMRX_CFG 0x00
......@@ -56,7 +52,8 @@
#define CGXX_SCRATCH0_REG 0x1050
#define CGXX_SCRATCH1_REG 0x1058
#define CGX_CONST 0x2000
#define CGX_CONST_RXFIFO_SIZE GENMASK_ULL(23, 0)
#define CGX_CONST_RXFIFO_SIZE GENMASK_ULL(55, 32)
#define CGX_CONST_MAX_LMACS GENMASK_ULL(31, 24)
#define CGXX_SPUX_CONTROL1 0x10000
#define CGXX_SPUX_LNX_FEC_CORR_BLOCKS 0x10700
#define CGXX_SPUX_LNX_FEC_UNCORR_BLOCKS 0x10800
......
......@@ -75,6 +75,11 @@ struct mac_ops {
/* RPM & CGX differs in number of Receive/transmit stats */
u8 rx_stats_cnt;
u8 tx_stats_cnt;
/* Unlike CN10K which shares same CSR offset with CGX
* CNF10KB has different csr offset
*/
u64 rxid_map_offset;
u8 dmac_filter_count;
/* Incase of RPM get number of lmacs from RPMX_CMR_RX_LMACS[LMAC_EXIST]
* number of setbits in lmac_exist tells number of lmacs
*/
......@@ -121,6 +126,9 @@ struct mac_ops {
int (*mac_get_pfc_frm_cfg)(void *cgxd, int lmac_id,
u8 *tx_pause, u8 *rx_pause);
/* FEC stats */
int (*get_fec_stats)(void *cgxd, int lmac_id,
struct cgx_fec_stats_rsp *rsp);
};
struct cgx {
......@@ -128,7 +136,10 @@ struct cgx {
struct pci_dev *pdev;
u8 cgx_id;
u8 lmac_count;
struct lmac *lmac_idmap[MAX_LMAC_PER_CGX];
/* number of LMACs per MAC could be 4 or 8 */
u8 max_lmac_per_mac;
#define MAX_LMAC_COUNT 8
struct lmac *lmac_idmap[MAX_LMAC_COUNT];
struct work_struct cgx_cmd_work;
struct workqueue_struct *cgx_cmd_workq;
struct list_head cgx_list;
......@@ -150,6 +161,6 @@ struct lmac *lmac_pdata(u8 lmac_id, struct cgx *cgx);
int cgx_fwi_cmd_send(u64 req, u64 *resp, struct lmac *lmac);
int cgx_fwi_cmd_generic(u64 req, u64 *resp, struct cgx *cgx, int lmac_id);
bool is_lmac_valid(struct cgx *cgx, int lmac_id);
struct mac_ops *rpm_get_mac_ops(void);
struct mac_ops *rpm_get_mac_ops(struct cgx *cgx);
#endif /* LMAC_COMMON_H */
......@@ -8,7 +8,7 @@
#include "cgx.h"
#include "lmac_common.h"
static struct mac_ops rpm_mac_ops = {
static struct mac_ops rpm_mac_ops = {
.name = "rpm",
.csr_offset = 0x4e00,
.lmac_offset = 20,
......@@ -20,12 +20,14 @@ static struct mac_ops rpm_mac_ops = {
.non_contiguous_serdes_lane = true,
.rx_stats_cnt = 43,
.tx_stats_cnt = 34,
.dmac_filter_count = 32,
.get_nr_lmacs = rpm_get_nr_lmacs,
.get_lmac_type = rpm_get_lmac_type,
.lmac_fifo_len = rpm_get_lmac_fifo_len,
.mac_lmac_intl_lbk = rpm_lmac_internal_loopback,
.mac_get_rx_stats = rpm_get_rx_stats,
.mac_get_tx_stats = rpm_get_tx_stats,
.get_fec_stats = rpm_get_fec_stats,
.mac_enadis_rx_pause_fwding = rpm_lmac_enadis_rx_pause_fwding,
.mac_get_pause_frm_status = rpm_lmac_get_pause_frm_status,
.mac_enadis_pause_frm = rpm_lmac_enadis_pause_frm,
......@@ -37,9 +39,50 @@ static struct mac_ops rpm_mac_ops = {
.mac_get_pfc_frm_cfg = rpm_lmac_get_pfc_frm_cfg,
};
struct mac_ops *rpm_get_mac_ops(void)
static struct mac_ops rpm2_mac_ops = {
.name = "rpm",
.csr_offset = RPM2_CSR_OFFSET,
.lmac_offset = 20,
.int_register = RPM2_CMRX_SW_INT,
.int_set_reg = RPM2_CMRX_SW_INT_ENA_W1S,
.irq_offset = 1,
.int_ena_bit = BIT_ULL(0),
.lmac_fwi = RPM_LMAC_FWI,
.non_contiguous_serdes_lane = true,
.rx_stats_cnt = 43,
.tx_stats_cnt = 34,
.dmac_filter_count = 64,
.get_nr_lmacs = rpm2_get_nr_lmacs,
.get_lmac_type = rpm_get_lmac_type,
.lmac_fifo_len = rpm2_get_lmac_fifo_len,
.mac_lmac_intl_lbk = rpm_lmac_internal_loopback,
.mac_get_rx_stats = rpm_get_rx_stats,
.mac_get_tx_stats = rpm_get_tx_stats,
.get_fec_stats = rpm_get_fec_stats,
.mac_enadis_rx_pause_fwding = rpm_lmac_enadis_rx_pause_fwding,
.mac_get_pause_frm_status = rpm_lmac_get_pause_frm_status,
.mac_enadis_pause_frm = rpm_lmac_enadis_pause_frm,
.mac_pause_frm_config = rpm_lmac_pause_frm_config,
.mac_enadis_ptp_config = rpm_lmac_ptp_config,
.mac_rx_tx_enable = rpm_lmac_rx_tx_enable,
.mac_tx_enable = rpm_lmac_tx_enable,
.pfc_config = rpm_lmac_pfc_config,
.mac_get_pfc_frm_cfg = rpm_lmac_get_pfc_frm_cfg,
};
bool is_dev_rpm2(void *rpmd)
{
rpm_t *rpm = rpmd;
return (rpm->pdev->device == PCI_DEVID_CN10KB_RPM);
}
struct mac_ops *rpm_get_mac_ops(rpm_t *rpm)
{
return &rpm_mac_ops;
if (is_dev_rpm2(rpm))
return &rpm2_mac_ops;
else
return &rpm_mac_ops;
}
static void rpm_write(rpm_t *rpm, u64 lmac, u64 offset, u64 val)
......@@ -52,6 +95,16 @@ static u64 rpm_read(rpm_t *rpm, u64 lmac, u64 offset)
return cgx_read(rpm, lmac, offset);
}
/* Read HW major version to determine RPM
* MAC type 100/USX
*/
static bool is_mac_rpmusx(void *rpmd)
{
rpm_t *rpm = rpmd;
return rpm_read(rpm, 0, RPMX_CONST1) & 0x700ULL;
}
int rpm_get_nr_lmacs(void *rpmd)
{
rpm_t *rpm = rpmd;
......@@ -59,6 +112,13 @@ int rpm_get_nr_lmacs(void *rpmd)
return hweight8(rpm_read(rpm, 0, CGXX_CMRX_RX_LMACS) & 0xFULL);
}
int rpm2_get_nr_lmacs(void *rpmd)
{
rpm_t *rpm = rpmd;
return hweight8(rpm_read(rpm, 0, RPM2_CMRX_RX_LMACS) & 0xFFULL);
}
int rpm_lmac_tx_enable(void *rpmd, int lmac_id, bool enable)
{
rpm_t *rpm = rpmd;
......@@ -222,6 +282,46 @@ static void rpm_cfg_pfc_quanta_thresh(rpm_t *rpm, int lmac_id,
}
}
static void rpm2_lmac_cfg_bp(rpm_t *rpm, int lmac_id, u8 tx_pause, u8 rx_pause)
{
u64 cfg;
cfg = rpm_read(rpm, lmac_id, RPM2_CMR_RX_OVR_BP);
if (tx_pause) {
/* Configure CL0 Pause Quanta & threshold
* for 802.3X frames
*/
rpm_cfg_pfc_quanta_thresh(rpm, lmac_id, 1, true);
cfg &= ~RPM2_CMR_RX_OVR_BP_EN;
} else {
/* Disable all Pause Quanta & threshold values */
rpm_cfg_pfc_quanta_thresh(rpm, lmac_id, 0xffff, false);
cfg |= RPM2_CMR_RX_OVR_BP_EN;
cfg &= ~RPM2_CMR_RX_OVR_BP_BP;
}
rpm_write(rpm, lmac_id, RPM2_CMR_RX_OVR_BP, cfg);
}
static void rpm_lmac_cfg_bp(rpm_t *rpm, int lmac_id, u8 tx_pause, u8 rx_pause)
{
u64 cfg;
cfg = rpm_read(rpm, 0, RPMX_CMR_RX_OVR_BP);
if (tx_pause) {
/* Configure CL0 Pause Quanta & threshold for
* 802.3X frames
*/
rpm_cfg_pfc_quanta_thresh(rpm, lmac_id, 1, true);
cfg &= ~RPMX_CMR_RX_OVR_BP_EN(lmac_id);
} else {
/* Disable all Pause Quanta & threshold values */
rpm_cfg_pfc_quanta_thresh(rpm, lmac_id, 0xffff, false);
cfg |= RPMX_CMR_RX_OVR_BP_EN(lmac_id);
cfg &= ~RPMX_CMR_RX_OVR_BP_BP(lmac_id);
}
rpm_write(rpm, 0, RPMX_CMR_RX_OVR_BP, cfg);
}
int rpm_lmac_enadis_pause_frm(void *rpmd, int lmac_id, u8 tx_pause,
u8 rx_pause)
{
......@@ -243,18 +343,11 @@ int rpm_lmac_enadis_pause_frm(void *rpmd, int lmac_id, u8 tx_pause,
cfg |= tx_pause ? 0x0 : RPMX_MTI_MAC100X_COMMAND_CONFIG_TX_P_DISABLE;
rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg);
cfg = rpm_read(rpm, 0, RPMX_CMR_RX_OVR_BP);
if (tx_pause) {
/* Configure CL0 Pause Quanta & threshold for 802.3X frames */
rpm_cfg_pfc_quanta_thresh(rpm, lmac_id, 1, true);
cfg &= ~RPMX_CMR_RX_OVR_BP_EN(lmac_id);
} else {
/* Disable all Pause Quanta & threshold values */
rpm_cfg_pfc_quanta_thresh(rpm, lmac_id, 0xffff, false);
cfg |= RPMX_CMR_RX_OVR_BP_EN(lmac_id);
cfg &= ~RPMX_CMR_RX_OVR_BP_BP(lmac_id);
}
rpm_write(rpm, 0, RPMX_CMR_RX_OVR_BP, cfg);
if (is_dev_rpm2(rpm))
rpm2_lmac_cfg_bp(rpm, lmac_id, tx_pause, rx_pause);
else
rpm_lmac_cfg_bp(rpm, lmac_id, tx_pause, rx_pause);
return 0;
}
......@@ -278,13 +371,16 @@ void rpm_lmac_pause_frm_config(void *rpmd, int lmac_id, bool enable)
cfg |= RPMX_MTI_MAC100X_COMMAND_CONFIG_TX_P_DISABLE;
rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg);
/* Enable channel mask for all LMACS */
if (is_dev_rpm2(rpm))
rpm_write(rpm, lmac_id, RPM2_CMR_CHAN_MSK_OR, 0xffff);
else
rpm_write(rpm, 0, RPMX_CMR_CHAN_MSK_OR, ~0ULL);
/* Disable all PFC classes */
cfg = rpm_read(rpm, lmac_id, RPMX_CMRX_PRT_CBFC_CTL);
cfg = FIELD_SET(RPM_PFC_CLASS_MASK, 0, cfg);
rpm_write(rpm, lmac_id, RPMX_CMRX_PRT_CBFC_CTL, cfg);
/* Enable channel mask for all LMACS */
rpm_write(rpm, 0, RPMX_CMR_CHAN_MSK_OR, ~0ULL);
}
int rpm_get_rx_stats(void *rpmd, int lmac_id, int idx, u64 *rx_stat)
......@@ -292,7 +388,7 @@ int rpm_get_rx_stats(void *rpmd, int lmac_id, int idx, u64 *rx_stat)
rpm_t *rpm = rpmd;
u64 val_lo, val_hi;
if (!rpm || lmac_id >= rpm->lmac_count)
if (!is_lmac_valid(rpm, lmac_id))
return -ENODEV;
mutex_lock(&rpm->lock);
......@@ -320,7 +416,7 @@ int rpm_get_tx_stats(void *rpmd, int lmac_id, int idx, u64 *tx_stat)
rpm_t *rpm = rpmd;
u64 val_lo, val_hi;
if (!rpm || lmac_id >= rpm->lmac_count)
if (!is_lmac_valid(rpm, lmac_id))
return -ENODEV;
mutex_lock(&rpm->lock);
......@@ -380,13 +476,71 @@ u32 rpm_get_lmac_fifo_len(void *rpmd, int lmac_id)
return 0;
}
static int rpmusx_lmac_internal_loopback(rpm_t *rpm, int lmac_id, bool enable)
{
u64 cfg;
cfg = rpm_read(rpm, lmac_id, RPM2_USX_PCSX_CONTROL1);
if (enable)
cfg |= RPM2_USX_PCS_LBK;
else
cfg &= ~RPM2_USX_PCS_LBK;
rpm_write(rpm, lmac_id, RPM2_USX_PCSX_CONTROL1, cfg);
return 0;
}
u32 rpm2_get_lmac_fifo_len(void *rpmd, int lmac_id)
{
u64 hi_perf_lmac, lmac_info;
rpm_t *rpm = rpmd;
u8 num_lmacs;
u32 fifo_len;
lmac_info = rpm_read(rpm, 0, RPM2_CMRX_RX_LMACS);
/* LMACs are divided into two groups and each group
* gets half of the FIFO
* Group0 lmac_id range {0..3}
* Group1 lmac_id range {4..7}
*/
fifo_len = rpm->mac_ops->fifo_len / 2;
if (lmac_id < 4) {
num_lmacs = hweight8(lmac_info & 0xF);
hi_perf_lmac = (lmac_info >> 8) & 0x3ULL;
} else {
num_lmacs = hweight8(lmac_info & 0xF0);
hi_perf_lmac = (lmac_info >> 10) & 0x3ULL;
hi_perf_lmac += 4;
}
switch (num_lmacs) {
case 1:
return fifo_len;
case 2:
return fifo_len / 2;
case 3:
/* LMAC marked as hi_perf gets half of the FIFO
* and rest 1/4th
*/
if (lmac_id == hi_perf_lmac)
return fifo_len / 2;
return fifo_len / 4;
case 4:
default:
return fifo_len / 4;
}
return 0;
}
int rpm_lmac_internal_loopback(void *rpmd, int lmac_id, bool enable)
{
rpm_t *rpm = rpmd;
u8 lmac_type;
u64 cfg;
if (!rpm || lmac_id >= rpm->lmac_count)
if (!is_lmac_valid(rpm, lmac_id))
return -ENODEV;
lmac_type = rpm->mac_ops->get_lmac_type(rpm, lmac_id);
......@@ -395,6 +549,9 @@ int rpm_lmac_internal_loopback(void *rpmd, int lmac_id, bool enable)
return 0;
}
if (is_dev_rpm2(rpm) && is_mac_rpmusx(rpm))
return rpmusx_lmac_internal_loopback(rpm, lmac_id, enable);
cfg = rpm_read(rpm, lmac_id, RPMX_MTI_PCS100X_CONTROL1);
if (enable)
......@@ -439,8 +596,8 @@ void rpm_lmac_ptp_config(void *rpmd, int lmac_id, bool enable)
int rpm_lmac_pfc_config(void *rpmd, int lmac_id, u8 tx_pause, u8 rx_pause, u16 pfc_en)
{
u64 cfg, class_en, pfc_class_mask_cfg;
rpm_t *rpm = rpmd;
u64 cfg, class_en;
if (!is_lmac_valid(rpm, lmac_id))
return -ENODEV;
......@@ -476,7 +633,10 @@ int rpm_lmac_pfc_config(void *rpmd, int lmac_id, u8 tx_pause, u8 rx_pause, u16 p
rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg);
rpm_write(rpm, lmac_id, RPMX_CMRX_PRT_CBFC_CTL, class_en);
pfc_class_mask_cfg = is_dev_rpm2(rpm) ? RPM2_CMRX_PRT_CBFC_CTL :
RPMX_CMRX_PRT_CBFC_CTL;
rpm_write(rpm, lmac_id, pfc_class_mask_cfg, class_en);
return 0;
}
......@@ -497,3 +657,59 @@ int rpm_lmac_get_pfc_frm_cfg(void *rpmd, int lmac_id, u8 *tx_pause, u8 *rx_paus
return 0;
}
int rpm_get_fec_stats(void *rpmd, int lmac_id, struct cgx_fec_stats_rsp *rsp)
{
u64 val_lo, val_hi;
rpm_t *rpm = rpmd;
u64 cfg;
if (!is_lmac_valid(rpm, lmac_id))
return -ENODEV;
if (rpm->lmac_idmap[lmac_id]->link_info.fec == OTX2_FEC_NONE)
return 0;
if (rpm->lmac_idmap[lmac_id]->link_info.fec == OTX2_FEC_BASER) {
val_lo = rpm_read(rpm, lmac_id, RPMX_MTI_FCFECX_VL0_CCW_LO);
val_hi = rpm_read(rpm, lmac_id, RPMX_MTI_FCFECX_CW_HI);
rsp->fec_corr_blks = (val_hi << 16 | val_lo);
val_lo = rpm_read(rpm, lmac_id, RPMX_MTI_FCFECX_VL0_NCCW_LO);
val_hi = rpm_read(rpm, lmac_id, RPMX_MTI_FCFECX_CW_HI);
rsp->fec_uncorr_blks = (val_hi << 16 | val_lo);
/* 50G uses 2 Physical serdes lines */
if (rpm->lmac_idmap[lmac_id]->link_info.lmac_type_id ==
LMAC_MODE_50G_R) {
val_lo = rpm_read(rpm, lmac_id,
RPMX_MTI_FCFECX_VL1_CCW_LO);
val_hi = rpm_read(rpm, lmac_id,
RPMX_MTI_FCFECX_CW_HI);
rsp->fec_corr_blks += (val_hi << 16 | val_lo);
val_lo = rpm_read(rpm, lmac_id,
RPMX_MTI_FCFECX_VL1_NCCW_LO);
val_hi = rpm_read(rpm, lmac_id,
RPMX_MTI_FCFECX_CW_HI);
rsp->fec_uncorr_blks += (val_hi << 16 | val_lo);
}
} else {
/* enable RS-FEC capture */
cfg = rpm_read(rpm, 0, RPMX_MTI_STAT_STATN_CONTROL);
cfg |= RPMX_RSFEC_RX_CAPTURE | BIT(lmac_id);
rpm_write(rpm, 0, RPMX_MTI_STAT_STATN_CONTROL, cfg);
val_lo = rpm_read(rpm, 0,
RPMX_MTI_RSFEC_STAT_COUNTER_CAPTURE_2);
val_hi = rpm_read(rpm, 0, RPMX_MTI_STAT_DATA_HI_CDC);
rsp->fec_corr_blks = (val_hi << 32 | val_lo);
val_lo = rpm_read(rpm, 0,
RPMX_MTI_RSFEC_STAT_COUNTER_CAPTURE_3);
val_hi = rpm_read(rpm, 0, RPMX_MTI_STAT_DATA_HI_CDC);
rsp->fec_uncorr_blks = (val_hi << 32 | val_lo);
}
return 0;
}
......@@ -12,17 +12,19 @@
/* PCI device IDs */
#define PCI_DEVID_CN10K_RPM 0xA060
#define PCI_SUBSYS_DEVID_CNF10KB_RPM 0xBC00
#define PCI_DEVID_CN10KB_RPM 0xA09F
/* Registers */
#define RPMX_CMRX_CFG 0x00
#define RPMX_RX_TS_PREPEND BIT_ULL(22)
#define RPMX_TX_PTP_1S_SUPPORT BIT_ULL(17)
#define RPMX_CMRX_RX_ID_MAP 0x80
#define RPMX_CMRX_SW_INT 0x180
#define RPMX_CMRX_SW_INT_W1S 0x188
#define RPMX_CMRX_SW_INT_ENA_W1S 0x198
#define RPMX_CMRX_LINK_CFG 0x1070
#define RPMX_MTI_PCS100X_CONTROL1 0x20000
#define RPMX_MTI_LPCSX_CONTROL1 0x30000
#define RPMX_MTI_PCS_LBK BIT_ULL(14)
#define RPMX_MTI_LPCSX_CONTROL(id) (0x30000 | ((id) * 0x100))
......@@ -76,11 +78,40 @@
#define RPMX_MTI_MAC100X_XIF_MODE 0x8100
#define RPMX_ONESTEP_ENABLE BIT_ULL(5)
#define RPMX_TS_BINARY_MODE BIT_ULL(11)
#define RPMX_CONST1 0x2008
/* FEC stats */
#define RPMX_MTI_STAT_STATN_CONTROL 0x10018
#define RPMX_MTI_STAT_DATA_HI_CDC 0x10038
#define RPMX_RSFEC_RX_CAPTURE BIT_ULL(27)
#define RPMX_MTI_RSFEC_STAT_COUNTER_CAPTURE_2 0x40050
#define RPMX_MTI_RSFEC_STAT_COUNTER_CAPTURE_3 0x40058
#define RPMX_MTI_FCFECX_VL0_CCW_LO 0x38618
#define RPMX_MTI_FCFECX_VL0_NCCW_LO 0x38620
#define RPMX_MTI_FCFECX_VL1_CCW_LO 0x38628
#define RPMX_MTI_FCFECX_VL1_NCCW_LO 0x38630
#define RPMX_MTI_FCFECX_CW_HI 0x38638
/* CN10KB CSR Declaration */
#define RPM2_CMRX_SW_INT 0x1b0
#define RPM2_CMRX_SW_INT_ENA_W1S 0x1b8
#define RPM2_CMR_CHAN_MSK_OR 0x3120
#define RPM2_CMR_RX_OVR_BP_EN BIT_ULL(2)
#define RPM2_CMR_RX_OVR_BP_BP BIT_ULL(1)
#define RPM2_CMR_RX_OVR_BP 0x3130
#define RPM2_CSR_OFFSET 0x3e00
#define RPM2_CMRX_PRT_CBFC_CTL 0x6510
#define RPM2_CMRX_RX_LMACS 0x100
#define RPM2_CMRX_RX_LOGL_XON 0x3100
#define RPM2_CMRX_RX_STAT2 0x3010
#define RPM2_USX_PCSX_CONTROL1 0x80000
#define RPM2_USX_PCS_LBK BIT_ULL(14)
/* Function Declarations */
int rpm_get_nr_lmacs(void *rpmd);
u8 rpm_get_lmac_type(void *rpmd, int lmac_id);
u32 rpm_get_lmac_fifo_len(void *rpmd, int lmac_id);
u32 rpm2_get_lmac_fifo_len(void *rpmd, int lmac_id);
int rpm_lmac_internal_loopback(void *rpmd, int lmac_id, bool enable);
void rpm_lmac_enadis_rx_pause_fwding(void *rpmd, int lmac_id, bool enable);
int rpm_lmac_get_pause_frm_status(void *cgxd, int lmac_id, u8 *tx_pause,
......@@ -97,4 +128,7 @@ int rpm_lmac_pfc_config(void *rpmd, int lmac_id, u8 tx_pause, u8 rx_pause,
u16 pfc_en);
int rpm_lmac_get_pfc_frm_cfg(void *rpmd, int lmac_id, u8 *tx_pause,
u8 *rx_pause);
int rpm2_get_nr_lmacs(void *rpmd);
bool is_dev_rpm2(void *rpmd);
int rpm_get_fec_stats(void *cgxd, int lmac_id, struct cgx_fec_stats_rsp *rsp);
#endif /* RPM_H */
......@@ -410,9 +410,15 @@ struct rvu_fwdata {
u32 ptp_ext_tstamp;
#define FWDATA_RESERVED_MEM 1022
u64 reserved[FWDATA_RESERVED_MEM];
#define CGX_MAX 5
#define CGX_MAX 9
#define CGX_LMACS_MAX 4
struct cgx_lmac_fwdata_s cgx_fw_data[CGX_MAX][CGX_LMACS_MAX];
#define CGX_LMACS_USX 8
union {
struct cgx_lmac_fwdata_s
cgx_fw_data[CGX_MAX][CGX_LMACS_MAX];
struct cgx_lmac_fwdata_s
cgx_fw_data_usx[CGX_MAX][CGX_LMACS_USX];
};
/* Do not add new fields below this line */
};
......@@ -478,7 +484,7 @@ struct rvu {
u8 cgx_mapped_pfs;
u8 cgx_cnt_max; /* CGX port count max */
u8 *pf2cgxlmac_map; /* pf to cgx_lmac map */
u16 *cgxlmac2pf_map; /* bitmap of mapped pfs for
u64 *cgxlmac2pf_map; /* bitmap of mapped pfs for
* every cgx lmac port
*/
unsigned long pf_notify_bmap; /* Flags for PF notification */
......
......@@ -55,8 +55,9 @@ bool is_mac_feature_supported(struct rvu *rvu, int pf, int feature)
return (cgx_features_get(cgxd) & feature);
}
#define CGX_OFFSET(x) ((x) * rvu->hw->lmac_per_cgx)
/* Returns bitmap of mapped PFs */
static u16 cgxlmac_to_pfmap(struct rvu *rvu, u8 cgx_id, u8 lmac_id)
static u64 cgxlmac_to_pfmap(struct rvu *rvu, u8 cgx_id, u8 lmac_id)
{
return rvu->cgxlmac2pf_map[CGX_OFFSET(cgx_id) + lmac_id];
}
......@@ -71,7 +72,8 @@ int cgxlmac_to_pf(struct rvu *rvu, int cgx_id, int lmac_id)
if (!pfmap)
return -ENODEV;
else
return find_first_bit(&pfmap, 16);
return find_first_bit(&pfmap,
rvu->cgx_cnt_max * rvu->hw->lmac_per_cgx);
}
static u8 cgxlmac_id_to_bmap(u8 cgx_id, u8 lmac_id)
......@@ -129,14 +131,14 @@ static int rvu_map_cgx_lmac_pf(struct rvu *rvu)
if (!cgx_cnt_max)
return 0;
if (cgx_cnt_max > 0xF || MAX_LMAC_PER_CGX > 0xF)
if (cgx_cnt_max > 0xF || rvu->hw->lmac_per_cgx > 0xF)
return -EINVAL;
/* Alloc map table
* An additional entry is required since PF id starts from 1 and
* hence entry at offset 0 is invalid.
*/
size = (cgx_cnt_max * MAX_LMAC_PER_CGX + 1) * sizeof(u8);
size = (cgx_cnt_max * rvu->hw->lmac_per_cgx + 1) * sizeof(u8);
rvu->pf2cgxlmac_map = devm_kmalloc(rvu->dev, size, GFP_KERNEL);
if (!rvu->pf2cgxlmac_map)
return -ENOMEM;
......@@ -145,9 +147,10 @@ static int rvu_map_cgx_lmac_pf(struct rvu *rvu)
memset(rvu->pf2cgxlmac_map, 0xFF, size);
/* Reverse map table */
rvu->cgxlmac2pf_map = devm_kzalloc(rvu->dev,
cgx_cnt_max * MAX_LMAC_PER_CGX * sizeof(u16),
GFP_KERNEL);
rvu->cgxlmac2pf_map =
devm_kzalloc(rvu->dev,
cgx_cnt_max * rvu->hw->lmac_per_cgx * sizeof(u64),
GFP_KERNEL);
if (!rvu->cgxlmac2pf_map)
return -ENOMEM;
......@@ -156,7 +159,7 @@ static int rvu_map_cgx_lmac_pf(struct rvu *rvu)
if (!rvu_cgx_pdata(cgx, rvu))
continue;
lmac_bmap = cgx_get_lmac_bmap(rvu_cgx_pdata(cgx, rvu));
for_each_set_bit(iter, &lmac_bmap, MAX_LMAC_PER_CGX) {
for_each_set_bit(iter, &lmac_bmap, rvu->hw->lmac_per_cgx) {
lmac = cgx_get_lmacid(rvu_cgx_pdata(cgx, rvu),
iter);
rvu->pf2cgxlmac_map[pf] = cgxlmac_id_to_bmap(cgx, lmac);
......@@ -235,7 +238,8 @@ static void cgx_notify_pfs(struct cgx_link_event *event, struct rvu *rvu)
pfmap = cgxlmac_to_pfmap(rvu, event->cgx_id, event->lmac_id);
do {
pfid = find_first_bit(&pfmap, 16);
pfid = find_first_bit(&pfmap,
rvu->cgx_cnt_max * rvu->hw->lmac_per_cgx);
clear_bit(pfid, &pfmap);
/* check if notification is enabled */
......@@ -310,7 +314,7 @@ static int cgx_lmac_event_handler_init(struct rvu *rvu)
if (!cgxd)
continue;
lmac_bmap = cgx_get_lmac_bmap(cgxd);
for_each_set_bit(lmac, &lmac_bmap, MAX_LMAC_PER_CGX) {
for_each_set_bit(lmac, &lmac_bmap, rvu->hw->lmac_per_cgx) {
err = cgx_lmac_evh_register(&cb, cgxd, lmac);
if (err)
dev_err(rvu->dev,
......@@ -396,7 +400,7 @@ int rvu_cgx_exit(struct rvu *rvu)
if (!cgxd)
continue;
lmac_bmap = cgx_get_lmac_bmap(cgxd);
for_each_set_bit(lmac, &lmac_bmap, MAX_LMAC_PER_CGX)
for_each_set_bit(lmac, &lmac_bmap, rvu->hw->lmac_per_cgx)
cgx_lmac_evh_unregister(cgxd, lmac);
}
......@@ -468,6 +472,7 @@ void rvu_cgx_disable_dmac_entries(struct rvu *rvu, u16 pcifunc)
{
int pf = rvu_get_pf(pcifunc);
int i = 0, lmac_count = 0;
struct mac_ops *mac_ops;
u8 max_dmac_filters;
u8 cgx_id, lmac_id;
void *cgx_dev;
......@@ -483,7 +488,12 @@ void rvu_cgx_disable_dmac_entries(struct rvu *rvu, u16 pcifunc)
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
cgx_dev = cgx_get_pdata(cgx_id);
lmac_count = cgx_get_lmac_cnt(cgx_dev);
max_dmac_filters = MAX_DMAC_ENTRIES_PER_CGX / lmac_count;
mac_ops = get_mac_ops(cgx_dev);
if (!mac_ops)
return;
max_dmac_filters = mac_ops->dmac_filter_count / lmac_count;
for (i = 0; i < max_dmac_filters; i++)
cgx_lmac_addr_del(cgx_id, lmac_id, i);
......@@ -569,6 +579,7 @@ int rvu_mbox_handler_cgx_fec_stats(struct rvu *rvu,
struct cgx_fec_stats_rsp *rsp)
{
int pf = rvu_get_pf(req->hdr.pcifunc);
struct mac_ops *mac_ops;
u8 cgx_idx, lmac;
void *cgxd;
......@@ -577,7 +588,8 @@ int rvu_mbox_handler_cgx_fec_stats(struct rvu *rvu,
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_idx, &lmac);
cgxd = rvu_cgx_pdata(cgx_idx, rvu);
return cgx_get_fec_stats(cgxd, lmac, rsp);
mac_ops = get_mac_ops(cgxd);
return mac_ops->get_fec_stats(cgxd, lmac, rsp);
}
int rvu_mbox_handler_cgx_mac_addr_set(struct rvu *rvu,
......@@ -1110,8 +1122,15 @@ int rvu_mbox_handler_cgx_get_aux_link_info(struct rvu *rvu, struct msg_req *req,
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
memcpy(&rsp->fwdata, &rvu->fwdata->cgx_fw_data[cgx_id][lmac_id],
sizeof(struct cgx_lmac_fwdata_s));
if (rvu->hw->lmac_per_cgx == CGX_LMACS_USX)
memcpy(&rsp->fwdata,
&rvu->fwdata->cgx_fw_data_usx[cgx_id][lmac_id],
sizeof(struct cgx_lmac_fwdata_s));
else
memcpy(&rsp->fwdata,
&rvu->fwdata->cgx_fw_data[cgx_id][lmac_id],
sizeof(struct cgx_lmac_fwdata_s));
return 0;
}
......
......@@ -2613,7 +2613,7 @@ static void rvu_dbg_cgx_init(struct rvu *rvu)
rvu->rvu_dbg.cgx = debugfs_create_dir(dname,
rvu->rvu_dbg.cgx_root);
for_each_set_bit(lmac_id, &lmac_bmap, MAX_LMAC_PER_CGX) {
for_each_set_bit(lmac_id, &lmac_bmap, rvu->hw->lmac_per_cgx) {
/* lmac debugfs dir */
sprintf(dname, "lmac%d", lmac_id);
rvu->rvu_dbg.lmac =
......
......@@ -3197,8 +3197,12 @@ static void rvu_get_lbk_link_max_frs(struct rvu *rvu, u16 *max_mtu)
static void rvu_get_lmac_link_max_frs(struct rvu *rvu, u16 *max_mtu)
{
/* RPM supports FIFO len 128 KB */
if (rvu_cgx_get_fifolen(rvu) == 0x20000)
int fifo_size = rvu_cgx_get_fifolen(rvu);
/* RPM supports FIFO len 128 KB and RPM2 supports double the
* FIFO len to accommodate 8 LMACS
*/
if (fifo_size == 0x20000 || fifo_size == 0x40000)
*max_mtu = CN10K_LMAC_LINK_MAX_FRS;
else
*max_mtu = NIC_HW_MAX_FRS;
......@@ -4109,7 +4113,7 @@ static void nix_link_config(struct rvu *rvu, int blkaddr,
/* Get LMAC id's from bitmap */
lmac_bmap = cgx_get_lmac_bmap(rvu_cgx_pdata(cgx, rvu));
for_each_set_bit(iter, &lmac_bmap, MAX_LMAC_PER_CGX) {
for_each_set_bit(iter, &lmac_bmap, rvu->hw->lmac_per_cgx) {
lmac_fifo_len = rvu_cgx_get_lmac_fifolen(rvu, cgx, iter);
if (!lmac_fifo_len) {
dev_err(rvu->dev,
......
......@@ -1956,7 +1956,9 @@ int rvu_npc_exact_init(struct rvu *rvu)
/* Install SDP drop rule */
drop_mcam_idx = &table->num_drop_rules;
max_lmac_cnt = rvu->cgx_cnt_max * MAX_LMAC_PER_CGX + PF_CGXMAP_BASE;
max_lmac_cnt = rvu->cgx_cnt_max * rvu->hw->lmac_per_cgx +
PF_CGXMAP_BASE;
for (i = PF_CGXMAP_BASE; i < max_lmac_cnt; i++) {
if (rvu->pf2cgxlmac_map[i] == 0xFF)
continue;
......
......@@ -1268,6 +1268,39 @@ static int otx2_set_link_ksettings(struct net_device *netdev,
return err;
}
static void otx2_get_fec_stats(struct net_device *netdev,
struct ethtool_fec_stats *fec_stats)
{
struct otx2_nic *pfvf = netdev_priv(netdev);
struct cgx_fw_data *rsp;
otx2_update_lmac_fec_stats(pfvf);
/* Report MAC FEC stats */
fec_stats->corrected_blocks.total = pfvf->hw.cgx_fec_corr_blks;
fec_stats->uncorrectable_blocks.total = pfvf->hw.cgx_fec_uncorr_blks;
rsp = otx2_get_fwdata(pfvf);
if (!IS_ERR(rsp) && rsp->fwdata.phy.misc.has_fec_stats &&
!otx2_get_phy_fec_stats(pfvf)) {
/* Fetch fwdata again because it's been recently populated with
* latest PHY FEC stats.
*/
rsp = otx2_get_fwdata(pfvf);
if (!IS_ERR(rsp)) {
struct fec_stats_s *p = &rsp->fwdata.phy.fec_stats;
if (pfvf->linfo.fec == OTX2_FEC_BASER) {
fec_stats->corrected_blocks.total = p->brfec_corr_blks;
fec_stats->uncorrectable_blocks.total = p->brfec_uncorr_blks;
} else {
fec_stats->corrected_blocks.total = p->rsfec_corr_cws;
fec_stats->uncorrectable_blocks.total = p->rsfec_uncorr_cws;
}
}
}
}
static const struct ethtool_ops otx2_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_MAX_FRAMES |
......@@ -1298,6 +1331,7 @@ static const struct ethtool_ops otx2_ethtool_ops = {
.get_pauseparam = otx2_get_pauseparam,
.set_pauseparam = otx2_set_pauseparam,
.get_ts_info = otx2_get_ts_info,
.get_fec_stats = otx2_get_fec_stats,
.get_fecparam = otx2_get_fecparam,
.set_fecparam = otx2_set_fecparam,
.get_link_ksettings = otx2_get_link_ksettings,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment