Commit 3ccec4bd authored by David S. Miller's avatar David S. Miller

Merge branch 'mlx5-next'

Saeed Mahameed says:

====================
Mellanox 100G extending mlx5 ethtool support

Changes from V0:
	- Dropped: net/mlx5e: Disable link up on INIT HCA command
	  Due to Ido's and Or's requests we will submit this patch to net and will need it for -stable.
	- Rebased to: 11afbff8 ("Merge git://git.kernel.org/pub/scm/linux/kernel/git/pablo/nf-next")

This series is centralized around extending and improving mlx5 ethernet driver ethtool
support. We've done some code refactoring for ethtool statistics reporting, making it
more scalable and robust, now each reported ethtool counter belongs to a group and has
its own descriptor within that group, the descriptor holds the counter name and offset
in memory in that group memory block.

Added new counters:
	- Reporting more error and drop counter in ifconig/ip tool.
	- Per priority pause and traffic counter in ethtool.
	- link down events counter in ethtool.

Set features handling was also refactored a little bit to be more resilient and generic,
now setting more than one feature will not stop on the first failed one, but instead
it will try to continue setting others. We made it generic to make it simpler for adding
more features support, it is now done easily by only introducing a handler function of
the new supported netdev feature, and let the generic handler do the job.

New netdev features and ethtool support:
	- Netdev feature RXALL, set on/off FCS check offload.
	- Netdev feature HW_VLAN_CTAG_RX, set on/off rx-vlan stripping offload.
	- Ethtool interface identify.
	- Ethtool dump module EEPROM.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 4e095a9a 1b223dd3
...@@ -46,6 +46,7 @@ ...@@ -46,6 +46,7 @@
#include <linux/rhashtable.h> #include <linux/rhashtable.h>
#include "wq.h" #include "wq.h"
#include "mlx5_core.h" #include "mlx5_core.h"
#include "en_stats.h"
#define MLX5E_MAX_NUM_TC 8 #define MLX5E_MAX_NUM_TC 8
...@@ -148,245 +149,6 @@ struct mlx5e_umr_wqe { ...@@ -148,245 +149,6 @@ struct mlx5e_umr_wqe {
#define MLX5E_MIN_BW_ALLOC 1 /* Min percentage of BW allocation */ #define MLX5E_MIN_BW_ALLOC 1 /* Min percentage of BW allocation */
#endif #endif
static const char vport_strings[][ETH_GSTRING_LEN] = {
/* vport statistics */
"rx_packets",
"rx_bytes",
"tx_packets",
"tx_bytes",
"rx_error_packets",
"rx_error_bytes",
"tx_error_packets",
"tx_error_bytes",
"rx_unicast_packets",
"rx_unicast_bytes",
"tx_unicast_packets",
"tx_unicast_bytes",
"rx_multicast_packets",
"rx_multicast_bytes",
"tx_multicast_packets",
"tx_multicast_bytes",
"rx_broadcast_packets",
"rx_broadcast_bytes",
"tx_broadcast_packets",
"tx_broadcast_bytes",
/* SW counters */
"tso_packets",
"tso_bytes",
"tso_inner_packets",
"tso_inner_bytes",
"lro_packets",
"lro_bytes",
"rx_csum_good",
"rx_csum_none",
"rx_csum_sw",
"tx_csum_offload",
"tx_csum_inner",
"tx_queue_stopped",
"tx_queue_wake",
"tx_queue_dropped",
"rx_wqe_err",
"rx_mpwqe_filler",
"rx_mpwqe_frag",
"rx_buff_alloc_err",
};
struct mlx5e_vport_stats {
/* HW counters */
u64 rx_packets;
u64 rx_bytes;
u64 tx_packets;
u64 tx_bytes;
u64 rx_error_packets;
u64 rx_error_bytes;
u64 tx_error_packets;
u64 tx_error_bytes;
u64 rx_unicast_packets;
u64 rx_unicast_bytes;
u64 tx_unicast_packets;
u64 tx_unicast_bytes;
u64 rx_multicast_packets;
u64 rx_multicast_bytes;
u64 tx_multicast_packets;
u64 tx_multicast_bytes;
u64 rx_broadcast_packets;
u64 rx_broadcast_bytes;
u64 tx_broadcast_packets;
u64 tx_broadcast_bytes;
/* SW counters */
u64 tso_packets;
u64 tso_bytes;
u64 tso_inner_packets;
u64 tso_inner_bytes;
u64 lro_packets;
u64 lro_bytes;
u64 rx_csum_good;
u64 rx_csum_none;
u64 rx_csum_sw;
u64 tx_csum_offload;
u64 tx_csum_inner;
u64 tx_queue_stopped;
u64 tx_queue_wake;
u64 tx_queue_dropped;
u64 rx_wqe_err;
u64 rx_mpwqe_filler;
u64 rx_mpwqe_frag;
u64 rx_buff_alloc_err;
#define NUM_VPORT_COUNTERS 38
};
static const char pport_strings[][ETH_GSTRING_LEN] = {
/* IEEE802.3 counters */
"frames_tx",
"frames_rx",
"check_seq_err",
"alignment_err",
"octets_tx",
"octets_received",
"multicast_xmitted",
"broadcast_xmitted",
"multicast_rx",
"broadcast_rx",
"in_range_len_errors",
"out_of_range_len",
"too_long_errors",
"symbol_err",
"mac_control_tx",
"mac_control_rx",
"unsupported_op_rx",
"pause_ctrl_rx",
"pause_ctrl_tx",
/* RFC2863 counters */
"in_octets",
"in_ucast_pkts",
"in_discards",
"in_errors",
"in_unknown_protos",
"out_octets",
"out_ucast_pkts",
"out_discards",
"out_errors",
"in_multicast_pkts",
"in_broadcast_pkts",
"out_multicast_pkts",
"out_broadcast_pkts",
/* RFC2819 counters */
"drop_events",
"octets",
"pkts",
"broadcast_pkts",
"multicast_pkts",
"crc_align_errors",
"undersize_pkts",
"oversize_pkts",
"fragments",
"jabbers",
"collisions",
"p64octets",
"p65to127octets",
"p128to255octets",
"p256to511octets",
"p512to1023octets",
"p1024to1518octets",
"p1519to2047octets",
"p2048to4095octets",
"p4096to8191octets",
"p8192to10239octets",
};
#define NUM_IEEE_802_3_COUNTERS 19
#define NUM_RFC_2863_COUNTERS 13
#define NUM_RFC_2819_COUNTERS 21
#define NUM_PPORT_COUNTERS (NUM_IEEE_802_3_COUNTERS + \
NUM_RFC_2863_COUNTERS + \
NUM_RFC_2819_COUNTERS)
struct mlx5e_pport_stats {
__be64 IEEE_802_3_counters[NUM_IEEE_802_3_COUNTERS];
__be64 RFC_2863_counters[NUM_RFC_2863_COUNTERS];
__be64 RFC_2819_counters[NUM_RFC_2819_COUNTERS];
};
static const char qcounter_stats_strings[][ETH_GSTRING_LEN] = {
"rx_out_of_buffer",
};
struct mlx5e_qcounter_stats {
u32 rx_out_of_buffer;
#define NUM_Q_COUNTERS 1
};
static const char rq_stats_strings[][ETH_GSTRING_LEN] = {
"packets",
"bytes",
"csum_none",
"csum_sw",
"lro_packets",
"lro_bytes",
"wqe_err",
"mpwqe_filler",
"mpwqe_frag",
"buff_alloc_err",
};
struct mlx5e_rq_stats {
u64 packets;
u64 bytes;
u64 csum_none;
u64 csum_sw;
u64 lro_packets;
u64 lro_bytes;
u64 wqe_err;
u64 mpwqe_filler;
u64 mpwqe_frag;
u64 buff_alloc_err;
#define NUM_RQ_STATS 10
};
static const char sq_stats_strings[][ETH_GSTRING_LEN] = {
"packets",
"bytes",
"tso_packets",
"tso_bytes",
"tso_inner_packets",
"tso_inner_bytes",
"csum_offload_inner",
"nop",
"csum_offload_none",
"stopped",
"wake",
"dropped",
};
struct mlx5e_sq_stats {
/* commonly accessed in data path */
u64 packets;
u64 bytes;
u64 tso_packets;
u64 tso_bytes;
u64 tso_inner_packets;
u64 tso_inner_bytes;
u64 csum_offload_inner;
u64 nop;
/* less likely accessed in data path */
u64 csum_offload_none;
u64 stopped;
u64 wake;
u64 dropped;
#define NUM_SQ_STATS 12
};
struct mlx5e_stats {
struct mlx5e_vport_stats vport;
struct mlx5e_pport_stats pport;
struct mlx5e_qcounter_stats qcnt;
};
struct mlx5e_params { struct mlx5e_params {
u8 log_sq_size; u8 log_sq_size;
u8 rq_wq_type; u8 rq_wq_type;
...@@ -404,6 +166,7 @@ struct mlx5e_params { ...@@ -404,6 +166,7 @@ struct mlx5e_params {
u8 rss_hfunc; u8 rss_hfunc;
u8 toeplitz_hash_key[40]; u8 toeplitz_hash_key[40];
u32 indirection_rqt[MLX5E_INDIR_RQT_SIZE]; u32 indirection_rqt[MLX5E_INDIR_RQT_SIZE];
bool vlan_strip_disable;
#ifdef CONFIG_MLX5_CORE_EN_DCB #ifdef CONFIG_MLX5_CORE_EN_DCB
struct ieee_ets ets; struct ieee_ets ets;
#endif #endif
...@@ -813,6 +576,8 @@ int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __always_unused __be16 proto, ...@@ -813,6 +576,8 @@ int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __always_unused __be16 proto,
void mlx5e_enable_vlan_filter(struct mlx5e_priv *priv); void mlx5e_enable_vlan_filter(struct mlx5e_priv *priv);
void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv); void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv);
int mlx5e_modify_rqs_vsd(struct mlx5e_priv *priv, bool vsd);
int mlx5e_redirect_rqt(struct mlx5e_priv *priv, enum mlx5e_rqt_ix rqt_ix); int mlx5e_redirect_rqt(struct mlx5e_priv *priv, enum mlx5e_rqt_ix rqt_ix);
void mlx5e_build_tir_ctx_hash(void *tirc, struct mlx5e_priv *priv); void mlx5e_build_tir_ctx_hash(void *tirc, struct mlx5e_priv *priv);
......
...@@ -174,8 +174,14 @@ static int mlx5e_dcbnl_ieee_getpfc(struct net_device *dev, ...@@ -174,8 +174,14 @@ static int mlx5e_dcbnl_ieee_getpfc(struct net_device *dev,
{ {
struct mlx5e_priv *priv = netdev_priv(dev); struct mlx5e_priv *priv = netdev_priv(dev);
struct mlx5_core_dev *mdev = priv->mdev; struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5e_pport_stats *pstats = &priv->stats.pport;
int i;
pfc->pfc_cap = mlx5_max_tc(mdev) + 1; pfc->pfc_cap = mlx5_max_tc(mdev) + 1;
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
pfc->requests[i] = PPORT_PER_PRIO_GET(pstats, i, tx_pause);
pfc->indications[i] = PPORT_PER_PRIO_GET(pstats, i, rx_pause);
}
return mlx5_query_port_pfc(mdev, &pfc->pfc_en, NULL); return mlx5_query_port_pfc(mdev, &pfc->pfc_en, NULL);
} }
......
...@@ -543,16 +543,26 @@ static inline void mlx5e_handle_csum(struct net_device *netdev, ...@@ -543,16 +543,26 @@ static inline void mlx5e_handle_csum(struct net_device *netdev,
if (lro) { if (lro) {
skb->ip_summed = CHECKSUM_UNNECESSARY; skb->ip_summed = CHECKSUM_UNNECESSARY;
} else if (likely(is_first_ethertype_ip(skb))) { return;
}
if (is_first_ethertype_ip(skb)) {
skb->ip_summed = CHECKSUM_COMPLETE; skb->ip_summed = CHECKSUM_COMPLETE;
skb->csum = csum_unfold((__force __sum16)cqe->check_sum); skb->csum = csum_unfold((__force __sum16)cqe->check_sum);
rq->stats.csum_sw++; rq->stats.csum_sw++;
} else { return;
goto csum_none;
} }
if (likely((cqe->hds_ip_ext & CQE_L3_OK) &&
(cqe->hds_ip_ext & CQE_L4_OK))) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
if (cqe_is_tunneled(cqe)) {
skb->csum_level = 1;
skb->encapsulation = 1;
rq->stats.csum_inner++;
}
return; return;
}
csum_none: csum_none:
skb->ip_summed = CHECKSUM_NONE; skb->ip_summed = CHECKSUM_NONE;
rq->stats.csum_none++; rq->stats.csum_none++;
......
This diff is collapsed.
...@@ -115,6 +115,19 @@ int mlx5_query_port_ptys(struct mlx5_core_dev *dev, u32 *ptys, ...@@ -115,6 +115,19 @@ int mlx5_query_port_ptys(struct mlx5_core_dev *dev, u32 *ptys,
} }
EXPORT_SYMBOL_GPL(mlx5_query_port_ptys); EXPORT_SYMBOL_GPL(mlx5_query_port_ptys);
int mlx5_set_port_beacon(struct mlx5_core_dev *dev, u16 beacon_duration)
{
u32 out[MLX5_ST_SZ_DW(mlcr_reg)];
u32 in[MLX5_ST_SZ_DW(mlcr_reg)];
memset(in, 0, sizeof(in));
MLX5_SET(mlcr_reg, in, local_port, 1);
MLX5_SET(mlcr_reg, in, beacon_duration, beacon_duration);
return mlx5_core_access_reg(dev, in, sizeof(in), out,
sizeof(out), MLX5_REG_MLCR, 0, 1);
}
int mlx5_query_port_proto_cap(struct mlx5_core_dev *dev, int mlx5_query_port_proto_cap(struct mlx5_core_dev *dev,
u32 *proto_cap, int proto_mask) u32 *proto_cap, int proto_mask)
{ {
...@@ -297,6 +310,82 @@ void mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, int *oper_mtu, ...@@ -297,6 +310,82 @@ void mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, int *oper_mtu,
} }
EXPORT_SYMBOL_GPL(mlx5_query_port_oper_mtu); EXPORT_SYMBOL_GPL(mlx5_query_port_oper_mtu);
static int mlx5_query_module_num(struct mlx5_core_dev *dev, int *module_num)
{
u32 out[MLX5_ST_SZ_DW(pmlp_reg)];
u32 in[MLX5_ST_SZ_DW(pmlp_reg)];
int module_mapping;
int err;
memset(in, 0, sizeof(in));
MLX5_SET(pmlp_reg, in, local_port, 1);
err = mlx5_core_access_reg(dev, in, sizeof(in), out, sizeof(out),
MLX5_REG_PMLP, 0, 0);
if (err)
return err;
module_mapping = MLX5_GET(pmlp_reg, out, lane0_module_mapping);
*module_num = module_mapping & MLX5_EEPROM_IDENTIFIER_BYTE_MASK;
return 0;
}
int mlx5_query_module_eeprom(struct mlx5_core_dev *dev,
u16 offset, u16 size, u8 *data)
{
u32 out[MLX5_ST_SZ_DW(mcia_reg)];
u32 in[MLX5_ST_SZ_DW(mcia_reg)];
int module_num;
u16 i2c_addr;
int status;
int err;
void *ptr = MLX5_ADDR_OF(mcia_reg, out, dword_0);
err = mlx5_query_module_num(dev, &module_num);
if (err)
return err;
memset(in, 0, sizeof(in));
size = min_t(int, size, MLX5_EEPROM_MAX_BYTES);
if (offset < MLX5_EEPROM_PAGE_LENGTH &&
offset + size > MLX5_EEPROM_PAGE_LENGTH)
/* Cross pages read, read until offset 256 in low page */
size -= offset + size - MLX5_EEPROM_PAGE_LENGTH;
i2c_addr = MLX5_I2C_ADDR_LOW;
if (offset >= MLX5_EEPROM_PAGE_LENGTH) {
i2c_addr = MLX5_I2C_ADDR_HIGH;
offset -= MLX5_EEPROM_PAGE_LENGTH;
}
MLX5_SET(mcia_reg, in, l, 0);
MLX5_SET(mcia_reg, in, module, module_num);
MLX5_SET(mcia_reg, in, i2c_device_address, i2c_addr);
MLX5_SET(mcia_reg, in, page_number, 0);
MLX5_SET(mcia_reg, in, device_address, offset);
MLX5_SET(mcia_reg, in, size, size);
err = mlx5_core_access_reg(dev, in, sizeof(in), out,
sizeof(out), MLX5_REG_MCIA, 0, 0);
if (err)
return err;
status = MLX5_GET(mcia_reg, out, status);
if (status) {
mlx5_core_err(dev, "query_mcia_reg failed: status: 0x%x\n",
status);
return -EIO;
}
memcpy(data, ptr, size);
return size;
}
EXPORT_SYMBOL_GPL(mlx5_query_module_eeprom);
static int mlx5_query_port_pvlc(struct mlx5_core_dev *dev, u32 *pvlc, static int mlx5_query_port_pvlc(struct mlx5_core_dev *dev, u32 *pvlc,
int pvlc_size, u8 local_port) int pvlc_size, u8 local_port)
{ {
...@@ -607,3 +696,52 @@ int mlx5_query_port_wol(struct mlx5_core_dev *mdev, u8 *wol_mode) ...@@ -607,3 +696,52 @@ int mlx5_query_port_wol(struct mlx5_core_dev *mdev, u8 *wol_mode)
return err; return err;
} }
EXPORT_SYMBOL_GPL(mlx5_query_port_wol); EXPORT_SYMBOL_GPL(mlx5_query_port_wol);
static int mlx5_query_ports_check(struct mlx5_core_dev *mdev, u32 *out,
int outlen)
{
u32 in[MLX5_ST_SZ_DW(pcmr_reg)];
memset(in, 0, sizeof(in));
MLX5_SET(pcmr_reg, in, local_port, 1);
return mlx5_core_access_reg(mdev, in, sizeof(in), out,
outlen, MLX5_REG_PCMR, 0, 0);
}
static int mlx5_set_ports_check(struct mlx5_core_dev *mdev, u32 *in, int inlen)
{
u32 out[MLX5_ST_SZ_DW(pcmr_reg)];
return mlx5_core_access_reg(mdev, in, inlen, out,
sizeof(out), MLX5_REG_PCMR, 0, 1);
}
int mlx5_set_port_fcs(struct mlx5_core_dev *mdev, u8 enable)
{
u32 in[MLX5_ST_SZ_DW(pcmr_reg)];
memset(in, 0, sizeof(in));
MLX5_SET(pcmr_reg, in, local_port, 1);
MLX5_SET(pcmr_reg, in, fcs_chk, enable);
return mlx5_set_ports_check(mdev, in, sizeof(in));
}
void mlx5_query_port_fcs(struct mlx5_core_dev *mdev, bool *supported,
bool *enabled)
{
u32 out[MLX5_ST_SZ_DW(pcmr_reg)];
/* Default values for FW which do not support MLX5_REG_PCMR */
*supported = false;
*enabled = true;
if (!MLX5_CAP_GEN(mdev, ports_check))
return;
if (mlx5_query_ports_check(mdev, out, sizeof(out)))
return;
*supported = !!(MLX5_GET(pcmr_reg, out, fcs_cap));
*enabled = !!(MLX5_GET(pcmr_reg, out, fcs_chk));
}
...@@ -59,6 +59,7 @@ ...@@ -59,6 +59,7 @@
#define MLX5_FLD_SZ_BYTES(typ, fld) (__mlx5_bit_sz(typ, fld) / 8) #define MLX5_FLD_SZ_BYTES(typ, fld) (__mlx5_bit_sz(typ, fld) / 8)
#define MLX5_ST_SZ_BYTES(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 8) #define MLX5_ST_SZ_BYTES(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 8)
#define MLX5_ST_SZ_DW(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 32) #define MLX5_ST_SZ_DW(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 32)
#define MLX5_ST_SZ_QW(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 64)
#define MLX5_UN_SZ_BYTES(typ) (sizeof(union mlx5_ifc_##typ##_bits) / 8) #define MLX5_UN_SZ_BYTES(typ) (sizeof(union mlx5_ifc_##typ##_bits) / 8)
#define MLX5_UN_SZ_DW(typ) (sizeof(union mlx5_ifc_##typ##_bits) / 32) #define MLX5_UN_SZ_DW(typ) (sizeof(union mlx5_ifc_##typ##_bits) / 32)
#define MLX5_BYTE_OFF(typ, fld) (__mlx5_bit_off(typ, fld) / 8) #define MLX5_BYTE_OFF(typ, fld) (__mlx5_bit_off(typ, fld) / 8)
...@@ -644,7 +645,8 @@ struct mlx5_err_cqe { ...@@ -644,7 +645,8 @@ struct mlx5_err_cqe {
}; };
struct mlx5_cqe64 { struct mlx5_cqe64 {
u8 rsvd0[2]; u8 outer_l3_tunneled;
u8 rsvd0;
__be16 wqe_id; __be16 wqe_id;
u8 lro_tcppsh_abort_dupack; u8 lro_tcppsh_abort_dupack;
u8 lro_min_ttl; u8 lro_min_ttl;
...@@ -658,7 +660,7 @@ struct mlx5_cqe64 { ...@@ -658,7 +660,7 @@ struct mlx5_cqe64 {
__be16 slid; __be16 slid;
__be32 flags_rqpn; __be32 flags_rqpn;
u8 hds_ip_ext; u8 hds_ip_ext;
u8 l4_hdr_type_etc; u8 l4_l3_hdr_type;
__be16 vlan_info; __be16 vlan_info;
__be32 srqn; /* [31:24]: lro_num_seg, [23:0]: srqn */ __be32 srqn; /* [31:24]: lro_num_seg, [23:0]: srqn */
__be32 imm_inval_pkey; __be32 imm_inval_pkey;
...@@ -679,12 +681,22 @@ static inline int get_cqe_lro_tcppsh(struct mlx5_cqe64 *cqe) ...@@ -679,12 +681,22 @@ static inline int get_cqe_lro_tcppsh(struct mlx5_cqe64 *cqe)
static inline u8 get_cqe_l4_hdr_type(struct mlx5_cqe64 *cqe) static inline u8 get_cqe_l4_hdr_type(struct mlx5_cqe64 *cqe)
{ {
return (cqe->l4_hdr_type_etc >> 4) & 0x7; return (cqe->l4_l3_hdr_type >> 4) & 0x7;
}
static inline u8 get_cqe_l3_hdr_type(struct mlx5_cqe64 *cqe)
{
return (cqe->l4_l3_hdr_type >> 2) & 0x3;
}
static inline u8 cqe_is_tunneled(struct mlx5_cqe64 *cqe)
{
return cqe->outer_l3_tunneled & 0x1;
} }
static inline int cqe_has_vlan(struct mlx5_cqe64 *cqe) static inline int cqe_has_vlan(struct mlx5_cqe64 *cqe)
{ {
return !!(cqe->l4_hdr_type_etc & 0x1); return !!(cqe->l4_l3_hdr_type & 0x1);
} }
static inline u64 get_cqe_ts(struct mlx5_cqe64 *cqe) static inline u64 get_cqe_ts(struct mlx5_cqe64 *cqe)
...@@ -1368,6 +1380,7 @@ enum { ...@@ -1368,6 +1380,7 @@ enum {
MLX5_ETHERNET_EXTENDED_COUNTERS_GROUP = 0x5, MLX5_ETHERNET_EXTENDED_COUNTERS_GROUP = 0x5,
MLX5_PER_PRIORITY_COUNTERS_GROUP = 0x10, MLX5_PER_PRIORITY_COUNTERS_GROUP = 0x10,
MLX5_PER_TRAFFIC_CLASS_COUNTERS_GROUP = 0x11, MLX5_PER_TRAFFIC_CLASS_COUNTERS_GROUP = 0x11,
MLX5_PHYSICAL_LAYER_COUNTERS_GROUP = 0x12,
MLX5_INFINIBAND_PORT_COUNTERS_GROUP = 0x20, MLX5_INFINIBAND_PORT_COUNTERS_GROUP = 0x20,
}; };
......
...@@ -45,6 +45,10 @@ ...@@ -45,6 +45,10 @@
#include <linux/mlx5/device.h> #include <linux/mlx5/device.h>
#include <linux/mlx5/doorbell.h> #include <linux/mlx5/doorbell.h>
enum {
MLX5_RQ_BITMASK_VSD = 1 << 1,
};
enum { enum {
MLX5_BOARD_ID_LEN = 64, MLX5_BOARD_ID_LEN = 64,
MLX5_MAX_NAME_LEN = 16, MLX5_MAX_NAME_LEN = 16,
...@@ -112,9 +116,12 @@ enum { ...@@ -112,9 +116,12 @@ enum {
MLX5_REG_PMPE = 0x5010, MLX5_REG_PMPE = 0x5010,
MLX5_REG_PELC = 0x500e, MLX5_REG_PELC = 0x500e,
MLX5_REG_PVLC = 0x500f, MLX5_REG_PVLC = 0x500f,
MLX5_REG_PMLP = 0, /* TBD */ MLX5_REG_PCMR = 0x5041,
MLX5_REG_PMLP = 0x5002,
MLX5_REG_NODE_DESC = 0x6001, MLX5_REG_NODE_DESC = 0x6001,
MLX5_REG_HOST_ENDIANNESS = 0x7004, MLX5_REG_HOST_ENDIANNESS = 0x7004,
MLX5_REG_MCIA = 0x9014,
MLX5_REG_MLCR = 0x902b,
}; };
enum { enum {
......
...@@ -35,6 +35,24 @@ ...@@ -35,6 +35,24 @@
#include <linux/mlx5/driver.h> #include <linux/mlx5/driver.h>
enum mlx5_beacon_duration {
MLX5_BEACON_DURATION_OFF = 0x0,
MLX5_BEACON_DURATION_INF = 0xffff,
};
enum mlx5_module_id {
MLX5_MODULE_ID_SFP = 0x3,
MLX5_MODULE_ID_QSFP = 0xC,
MLX5_MODULE_ID_QSFP_PLUS = 0xD,
MLX5_MODULE_ID_QSFP28 = 0x11,
};
#define MLX5_EEPROM_MAX_BYTES 32
#define MLX5_EEPROM_IDENTIFIER_BYTE_MASK 0x000000ff
#define MLX5_I2C_ADDR_LOW 0x50
#define MLX5_I2C_ADDR_HIGH 0x51
#define MLX5_EEPROM_PAGE_LENGTH 256
int mlx5_set_port_caps(struct mlx5_core_dev *dev, u8 port_num, u32 caps); int mlx5_set_port_caps(struct mlx5_core_dev *dev, u8 port_num, u32 caps);
int mlx5_query_port_ptys(struct mlx5_core_dev *dev, u32 *ptys, int mlx5_query_port_ptys(struct mlx5_core_dev *dev, u32 *ptys,
int ptys_size, int proto_mask, u8 local_port); int ptys_size, int proto_mask, u8 local_port);
...@@ -53,6 +71,7 @@ int mlx5_set_port_admin_status(struct mlx5_core_dev *dev, ...@@ -53,6 +71,7 @@ int mlx5_set_port_admin_status(struct mlx5_core_dev *dev,
enum mlx5_port_status status); enum mlx5_port_status status);
int mlx5_query_port_admin_status(struct mlx5_core_dev *dev, int mlx5_query_port_admin_status(struct mlx5_core_dev *dev,
enum mlx5_port_status *status); enum mlx5_port_status *status);
int mlx5_set_port_beacon(struct mlx5_core_dev *dev, u16 beacon_duration);
int mlx5_set_port_mtu(struct mlx5_core_dev *dev, int mtu, u8 port); int mlx5_set_port_mtu(struct mlx5_core_dev *dev, int mtu, u8 port);
void mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, int *max_mtu, u8 port); void mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, int *max_mtu, u8 port);
...@@ -84,4 +103,10 @@ int mlx5_query_port_ets_rate_limit(struct mlx5_core_dev *mdev, ...@@ -84,4 +103,10 @@ int mlx5_query_port_ets_rate_limit(struct mlx5_core_dev *mdev,
int mlx5_set_port_wol(struct mlx5_core_dev *mdev, u8 wol_mode); int mlx5_set_port_wol(struct mlx5_core_dev *mdev, u8 wol_mode);
int mlx5_query_port_wol(struct mlx5_core_dev *mdev, u8 *wol_mode); int mlx5_query_port_wol(struct mlx5_core_dev *mdev, u8 *wol_mode);
int mlx5_set_port_fcs(struct mlx5_core_dev *mdev, u8 enable);
void mlx5_query_port_fcs(struct mlx5_core_dev *mdev, bool *supported,
bool *enabled);
int mlx5_query_module_eeprom(struct mlx5_core_dev *dev,
u16 offset, u16 size, u8 *data);
#endif /* __MLX5_PORT_H__ */ #endif /* __MLX5_PORT_H__ */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment