Commit e5935e62 authored by David S. Miller's avatar David S. Miller

Merge branch 'mellanox-next'

Amir Vadai says:

====================
Mellanox ethernet driver update Oct-27-2014

This patchset introduces some small bug fixes, support in get/set of
vlan offload and get/set/capabilities of the link.

First 7 patches by Saeed, add support in setting/getting link speed and getting
cable capabilities.
Next 2 patches also by Saeed, enable the user to turn rx/tx vlan offloading on
and off.
Jenni fixed a bug in error flow during device initalization.
Ido and Jack fixed some code duplication and errors discovered by static checker.
last patch by me is a fix to make ethtool report the actual rings used by
indirection QP.

Patches were applied and tested against commit 61ed53de ("Merge tag 'ntb-3.18'
of git://github.com/jonmason/ntb")
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 941d8ebc d5ec899a
...@@ -1338,6 +1338,15 @@ static struct mlx4_cmd_info cmd_info[] = { ...@@ -1338,6 +1338,15 @@ static struct mlx4_cmd_info cmd_info[] = {
.verify = NULL, .verify = NULL,
.wrapper = mlx4_QUERY_IF_STAT_wrapper .wrapper = mlx4_QUERY_IF_STAT_wrapper
}, },
{
.opcode = MLX4_CMD_ACCESS_REG,
.has_inbox = true,
.has_outbox = true,
.out_is_imm = false,
.encode_slave_id = false,
.verify = NULL,
.wrapper = NULL,
},
/* Native multicast commands are not available for guests */ /* Native multicast commands are not available for guests */
{ {
.opcode = MLX4_CMD_QP_ATTACH, .opcode = MLX4_CMD_QP_ATTACH,
......
...@@ -35,52 +35,6 @@ ...@@ -35,52 +35,6 @@
#include "mlx4_en.h" #include "mlx4_en.h"
int mlx4_en_timestamp_config(struct net_device *dev, int tx_type, int rx_filter)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
int port_up = 0;
int err = 0;
if (priv->hwtstamp_config.tx_type == tx_type &&
priv->hwtstamp_config.rx_filter == rx_filter)
return 0;
mutex_lock(&mdev->state_lock);
if (priv->port_up) {
port_up = 1;
mlx4_en_stop_port(dev, 1);
}
mlx4_en_free_resources(priv);
en_warn(priv, "Changing Time Stamp configuration\n");
priv->hwtstamp_config.tx_type = tx_type;
priv->hwtstamp_config.rx_filter = rx_filter;
if (rx_filter != HWTSTAMP_FILTER_NONE)
dev->features &= ~NETIF_F_HW_VLAN_CTAG_RX;
else
dev->features |= NETIF_F_HW_VLAN_CTAG_RX;
err = mlx4_en_alloc_resources(priv);
if (err) {
en_err(priv, "Failed reallocating port resources\n");
goto out;
}
if (port_up) {
err = mlx4_en_start_port(dev);
if (err)
en_err(priv, "Failed starting port\n");
}
out:
mutex_unlock(&mdev->state_lock);
netdev_features_change(dev);
return err;
}
/* mlx4_en_read_clock - read raw cycle counter (to be used by time counter) /* mlx4_en_read_clock - read raw cycle counter (to be used by time counter)
*/ */
static cycle_t mlx4_en_read_clock(const struct cyclecounter *tc) static cycle_t mlx4_en_read_clock(const struct cyclecounter *tc)
......
...@@ -35,6 +35,7 @@ ...@@ -35,6 +35,7 @@
#include <linux/ethtool.h> #include <linux/ethtool.h>
#include <linux/netdevice.h> #include <linux/netdevice.h>
#include <linux/mlx4/driver.h> #include <linux/mlx4/driver.h>
#include <linux/mlx4/device.h>
#include <linux/in.h> #include <linux/in.h>
#include <net/ip.h> #include <net/ip.h>
...@@ -374,7 +375,302 @@ static void mlx4_en_get_strings(struct net_device *dev, ...@@ -374,7 +375,302 @@ static void mlx4_en_get_strings(struct net_device *dev,
} }
} }
static int mlx4_en_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) static u32 mlx4_en_autoneg_get(struct net_device *dev)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
u32 autoneg = AUTONEG_DISABLE;
if ((mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ETH_BACKPL_AN_REP) &&
(priv->port_state.flags & MLX4_EN_PORT_ANE))
autoneg = AUTONEG_ENABLE;
return autoneg;
}
static u32 ptys_get_supported_port(struct mlx4_ptys_reg *ptys_reg)
{
u32 eth_proto = be32_to_cpu(ptys_reg->eth_proto_cap);
if (eth_proto & (MLX4_PROT_MASK(MLX4_10GBASE_T)
| MLX4_PROT_MASK(MLX4_1000BASE_T)
| MLX4_PROT_MASK(MLX4_100BASE_TX))) {
return SUPPORTED_TP;
}
if (eth_proto & (MLX4_PROT_MASK(MLX4_10GBASE_CR)
| MLX4_PROT_MASK(MLX4_10GBASE_SR)
| MLX4_PROT_MASK(MLX4_56GBASE_SR4)
| MLX4_PROT_MASK(MLX4_40GBASE_CR4)
| MLX4_PROT_MASK(MLX4_40GBASE_SR4)
| MLX4_PROT_MASK(MLX4_1000BASE_CX_SGMII))) {
return SUPPORTED_FIBRE;
}
if (eth_proto & (MLX4_PROT_MASK(MLX4_56GBASE_KR4)
| MLX4_PROT_MASK(MLX4_40GBASE_KR4)
| MLX4_PROT_MASK(MLX4_20GBASE_KR2)
| MLX4_PROT_MASK(MLX4_10GBASE_KR)
| MLX4_PROT_MASK(MLX4_10GBASE_KX4)
| MLX4_PROT_MASK(MLX4_1000BASE_KX))) {
return SUPPORTED_Backplane;
}
return 0;
}
static u32 ptys_get_active_port(struct mlx4_ptys_reg *ptys_reg)
{
u32 eth_proto = be32_to_cpu(ptys_reg->eth_proto_oper);
if (!eth_proto) /* link down */
eth_proto = be32_to_cpu(ptys_reg->eth_proto_cap);
if (eth_proto & (MLX4_PROT_MASK(MLX4_10GBASE_T)
| MLX4_PROT_MASK(MLX4_1000BASE_T)
| MLX4_PROT_MASK(MLX4_100BASE_TX))) {
return PORT_TP;
}
if (eth_proto & (MLX4_PROT_MASK(MLX4_10GBASE_SR)
| MLX4_PROT_MASK(MLX4_56GBASE_SR4)
| MLX4_PROT_MASK(MLX4_40GBASE_SR4)
| MLX4_PROT_MASK(MLX4_1000BASE_CX_SGMII))) {
return PORT_FIBRE;
}
if (eth_proto & (MLX4_PROT_MASK(MLX4_10GBASE_CR)
| MLX4_PROT_MASK(MLX4_56GBASE_CR4)
| MLX4_PROT_MASK(MLX4_40GBASE_CR4))) {
return PORT_DA;
}
if (eth_proto & (MLX4_PROT_MASK(MLX4_56GBASE_KR4)
| MLX4_PROT_MASK(MLX4_40GBASE_KR4)
| MLX4_PROT_MASK(MLX4_20GBASE_KR2)
| MLX4_PROT_MASK(MLX4_10GBASE_KR)
| MLX4_PROT_MASK(MLX4_10GBASE_KX4)
| MLX4_PROT_MASK(MLX4_1000BASE_KX))) {
return PORT_NONE;
}
return PORT_OTHER;
}
#define MLX4_LINK_MODES_SZ \
(FIELD_SIZEOF(struct mlx4_ptys_reg, eth_proto_cap) * 8)
enum ethtool_report {
SUPPORTED = 0,
ADVERTISED = 1,
SPEED = 2
};
/* Translates mlx4 link mode to equivalent ethtool Link modes/speed */
static u32 ptys2ethtool_map[MLX4_LINK_MODES_SZ][3] = {
[MLX4_100BASE_TX] = {
SUPPORTED_100baseT_Full,
ADVERTISED_100baseT_Full,
SPEED_100
},
[MLX4_1000BASE_T] = {
SUPPORTED_1000baseT_Full,
ADVERTISED_1000baseT_Full,
SPEED_1000
},
[MLX4_1000BASE_CX_SGMII] = {
SUPPORTED_1000baseKX_Full,
ADVERTISED_1000baseKX_Full,
SPEED_1000
},
[MLX4_1000BASE_KX] = {
SUPPORTED_1000baseKX_Full,
ADVERTISED_1000baseKX_Full,
SPEED_1000
},
[MLX4_10GBASE_T] = {
SUPPORTED_10000baseT_Full,
ADVERTISED_10000baseT_Full,
SPEED_10000
},
[MLX4_10GBASE_CX4] = {
SUPPORTED_10000baseKX4_Full,
ADVERTISED_10000baseKX4_Full,
SPEED_10000
},
[MLX4_10GBASE_KX4] = {
SUPPORTED_10000baseKX4_Full,
ADVERTISED_10000baseKX4_Full,
SPEED_10000
},
[MLX4_10GBASE_KR] = {
SUPPORTED_10000baseKR_Full,
ADVERTISED_10000baseKR_Full,
SPEED_10000
},
[MLX4_10GBASE_CR] = {
SUPPORTED_10000baseKR_Full,
ADVERTISED_10000baseKR_Full,
SPEED_10000
},
[MLX4_10GBASE_SR] = {
SUPPORTED_10000baseKR_Full,
ADVERTISED_10000baseKR_Full,
SPEED_10000
},
[MLX4_20GBASE_KR2] = {
SUPPORTED_20000baseMLD2_Full | SUPPORTED_20000baseKR2_Full,
ADVERTISED_20000baseMLD2_Full | ADVERTISED_20000baseKR2_Full,
SPEED_20000
},
[MLX4_40GBASE_CR4] = {
SUPPORTED_40000baseCR4_Full,
ADVERTISED_40000baseCR4_Full,
SPEED_40000
},
[MLX4_40GBASE_KR4] = {
SUPPORTED_40000baseKR4_Full,
ADVERTISED_40000baseKR4_Full,
SPEED_40000
},
[MLX4_40GBASE_SR4] = {
SUPPORTED_40000baseSR4_Full,
ADVERTISED_40000baseSR4_Full,
SPEED_40000
},
[MLX4_56GBASE_KR4] = {
SUPPORTED_56000baseKR4_Full,
ADVERTISED_56000baseKR4_Full,
SPEED_56000
},
[MLX4_56GBASE_CR4] = {
SUPPORTED_56000baseCR4_Full,
ADVERTISED_56000baseCR4_Full,
SPEED_56000
},
[MLX4_56GBASE_SR4] = {
SUPPORTED_56000baseSR4_Full,
ADVERTISED_56000baseSR4_Full,
SPEED_56000
},
};
static u32 ptys2ethtool_link_modes(u32 eth_proto, enum ethtool_report report)
{
int i;
u32 link_modes = 0;
for (i = 0; i < MLX4_LINK_MODES_SZ; i++) {
if (eth_proto & MLX4_PROT_MASK(i))
link_modes |= ptys2ethtool_map[i][report];
}
return link_modes;
}
static u32 ethtool2ptys_link_modes(u32 link_modes, enum ethtool_report report)
{
int i;
u32 ptys_modes = 0;
for (i = 0; i < MLX4_LINK_MODES_SZ; i++) {
if (ptys2ethtool_map[i][report] & link_modes)
ptys_modes |= 1 << i;
}
return ptys_modes;
}
/* Convert actual speed (SPEED_XXX) to ptys link modes */
static u32 speed2ptys_link_modes(u32 speed)
{
int i;
u32 ptys_modes = 0;
for (i = 0; i < MLX4_LINK_MODES_SZ; i++) {
if (ptys2ethtool_map[i][SPEED] == speed)
ptys_modes |= 1 << i;
}
return ptys_modes;
}
static int ethtool_get_ptys_settings(struct net_device *dev,
struct ethtool_cmd *cmd)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_ptys_reg ptys_reg;
u32 eth_proto;
int ret;
memset(&ptys_reg, 0, sizeof(ptys_reg));
ptys_reg.local_port = priv->port;
ptys_reg.proto_mask = MLX4_PTYS_EN;
ret = mlx4_ACCESS_PTYS_REG(priv->mdev->dev,
MLX4_ACCESS_REG_QUERY, &ptys_reg);
if (ret) {
en_warn(priv, "Failed to run mlx4_ACCESS_PTYS_REG status(%x)",
ret);
return ret;
}
en_dbg(DRV, priv, "ptys_reg.proto_mask %x\n",
ptys_reg.proto_mask);
en_dbg(DRV, priv, "ptys_reg.eth_proto_cap %x\n",
be32_to_cpu(ptys_reg.eth_proto_cap));
en_dbg(DRV, priv, "ptys_reg.eth_proto_admin %x\n",
be32_to_cpu(ptys_reg.eth_proto_admin));
en_dbg(DRV, priv, "ptys_reg.eth_proto_oper %x\n",
be32_to_cpu(ptys_reg.eth_proto_oper));
en_dbg(DRV, priv, "ptys_reg.eth_proto_lp_adv %x\n",
be32_to_cpu(ptys_reg.eth_proto_lp_adv));
cmd->supported = 0;
cmd->advertising = 0;
cmd->supported |= ptys_get_supported_port(&ptys_reg);
eth_proto = be32_to_cpu(ptys_reg.eth_proto_cap);
cmd->supported |= ptys2ethtool_link_modes(eth_proto, SUPPORTED);
eth_proto = be32_to_cpu(ptys_reg.eth_proto_admin);
cmd->advertising |= ptys2ethtool_link_modes(eth_proto, ADVERTISED);
cmd->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
cmd->advertising |= (priv->prof->tx_pause) ? ADVERTISED_Pause : 0;
cmd->advertising |= (priv->prof->tx_pause ^ priv->prof->rx_pause) ?
ADVERTISED_Asym_Pause : 0;
cmd->port = ptys_get_active_port(&ptys_reg);
cmd->transceiver = (SUPPORTED_TP & cmd->supported) ?
XCVR_EXTERNAL : XCVR_INTERNAL;
if (mlx4_en_autoneg_get(dev)) {
cmd->supported |= SUPPORTED_Autoneg;
cmd->advertising |= ADVERTISED_Autoneg;
}
cmd->autoneg = (priv->port_state.flags & MLX4_EN_PORT_ANC) ?
AUTONEG_ENABLE : AUTONEG_DISABLE;
eth_proto = be32_to_cpu(ptys_reg.eth_proto_lp_adv);
cmd->lp_advertising = ptys2ethtool_link_modes(eth_proto, ADVERTISED);
cmd->lp_advertising |= (priv->port_state.flags & MLX4_EN_PORT_ANC) ?
ADVERTISED_Autoneg : 0;
cmd->phy_address = 0;
cmd->mdio_support = 0;
cmd->maxtxpkt = 0;
cmd->maxrxpkt = 0;
cmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
cmd->eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO;
return ret;
}
static void ethtool_get_default_settings(struct net_device *dev,
struct ethtool_cmd *cmd)
{ {
struct mlx4_en_priv *priv = netdev_priv(dev); struct mlx4_en_priv *priv = netdev_priv(dev);
int trans_type; int trans_type;
...@@ -382,18 +678,7 @@ static int mlx4_en_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) ...@@ -382,18 +678,7 @@ static int mlx4_en_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
cmd->autoneg = AUTONEG_DISABLE; cmd->autoneg = AUTONEG_DISABLE;
cmd->supported = SUPPORTED_10000baseT_Full; cmd->supported = SUPPORTED_10000baseT_Full;
cmd->advertising = ADVERTISED_10000baseT_Full; cmd->advertising = ADVERTISED_10000baseT_Full;
trans_type = priv->port_state.transceiver;
if (mlx4_en_QUERY_PORT(priv->mdev, priv->port))
return -ENOMEM;
trans_type = priv->port_state.transciver;
if (netif_carrier_ok(dev)) {
ethtool_cmd_speed_set(cmd, priv->port_state.link_speed);
cmd->duplex = DUPLEX_FULL;
} else {
ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
cmd->duplex = DUPLEX_UNKNOWN;
}
if (trans_type > 0 && trans_type <= 0xC) { if (trans_type > 0 && trans_type <= 0xC) {
cmd->port = PORT_FIBRE; cmd->port = PORT_FIBRE;
...@@ -409,17 +694,118 @@ static int mlx4_en_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) ...@@ -409,17 +694,118 @@ static int mlx4_en_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
cmd->port = -1; cmd->port = -1;
cmd->transceiver = -1; cmd->transceiver = -1;
} }
}
static int mlx4_en_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
int ret = -EINVAL;
if (mlx4_en_QUERY_PORT(priv->mdev, priv->port))
return -ENOMEM;
en_dbg(DRV, priv, "query port state.flags ANC(%x) ANE(%x)\n",
priv->port_state.flags & MLX4_EN_PORT_ANC,
priv->port_state.flags & MLX4_EN_PORT_ANE);
if (priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ETH_PROT_CTRL)
ret = ethtool_get_ptys_settings(dev, cmd);
if (ret) /* ETH PROT CRTL is not supported or PTYS CMD failed */
ethtool_get_default_settings(dev, cmd);
if (netif_carrier_ok(dev)) {
ethtool_cmd_speed_set(cmd, priv->port_state.link_speed);
cmd->duplex = DUPLEX_FULL;
} else {
ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
cmd->duplex = DUPLEX_UNKNOWN;
}
return 0; return 0;
} }
/* Calculate PTYS admin according ethtool speed (SPEED_XXX) */
static __be32 speed_set_ptys_admin(struct mlx4_en_priv *priv, u32 speed,
__be32 proto_cap)
{
__be32 proto_admin = 0;
if (!speed) { /* Speed = 0 ==> Reset Link modes */
proto_admin = proto_cap;
en_info(priv, "Speed was set to 0, Reset advertised Link Modes to default (%x)\n",
be32_to_cpu(proto_cap));
} else {
u32 ptys_link_modes = speed2ptys_link_modes(speed);
proto_admin = cpu_to_be32(ptys_link_modes) & proto_cap;
en_info(priv, "Setting Speed to %d\n", speed);
}
return proto_admin;
}
static int mlx4_en_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) static int mlx4_en_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{ {
if ((cmd->autoneg == AUTONEG_ENABLE) || struct mlx4_en_priv *priv = netdev_priv(dev);
(ethtool_cmd_speed(cmd) != SPEED_10000) || struct mlx4_ptys_reg ptys_reg;
(cmd->duplex != DUPLEX_FULL)) __be32 proto_admin;
int ret;
u32 ptys_adv = ethtool2ptys_link_modes(cmd->advertising, ADVERTISED);
int speed = ethtool_cmd_speed(cmd);
en_dbg(DRV, priv, "Set Speed=%d adv=0x%x autoneg=%d duplex=%d\n",
speed, cmd->advertising, cmd->autoneg, cmd->duplex);
if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ETH_PROT_CTRL) ||
(cmd->autoneg == AUTONEG_ENABLE) || (cmd->duplex == DUPLEX_HALF))
return -EINVAL; return -EINVAL;
/* Nothing to change */ memset(&ptys_reg, 0, sizeof(ptys_reg));
ptys_reg.local_port = priv->port;
ptys_reg.proto_mask = MLX4_PTYS_EN;
ret = mlx4_ACCESS_PTYS_REG(priv->mdev->dev,
MLX4_ACCESS_REG_QUERY, &ptys_reg);
if (ret) {
en_warn(priv, "Failed to QUERY mlx4_ACCESS_PTYS_REG status(%x)\n",
ret);
return 0;
}
proto_admin = cpu_to_be32(ptys_adv);
if (speed >= 0 && speed != priv->port_state.link_speed)
/* If speed was set then speed decides :-) */
proto_admin = speed_set_ptys_admin(priv, speed,
ptys_reg.eth_proto_cap);
proto_admin &= ptys_reg.eth_proto_cap;
if (proto_admin == ptys_reg.eth_proto_admin)
return 0; /* Nothing to change */
if (!proto_admin) {
en_warn(priv, "Not supported link mode(s) requested, check supported link modes.\n");
return -EINVAL; /* nothing to change due to bad input */
}
en_dbg(DRV, priv, "mlx4_ACCESS_PTYS_REG SET: ptys_reg.eth_proto_admin = 0x%x\n",
be32_to_cpu(proto_admin));
ptys_reg.eth_proto_admin = proto_admin;
ret = mlx4_ACCESS_PTYS_REG(priv->mdev->dev, MLX4_ACCESS_REG_WRITE,
&ptys_reg);
if (ret) {
en_warn(priv, "Failed to write mlx4_ACCESS_PTYS_REG eth_proto_admin(0x%x) status(0x%x)",
be32_to_cpu(ptys_reg.eth_proto_admin), ret);
return ret;
}
en_warn(priv, "Port link mode changed, restarting port...\n");
mutex_lock(&priv->mdev->state_lock);
if (priv->port_up) {
mlx4_en_stop_port(dev, 1);
if (mlx4_en_start_port(dev))
en_err(priv, "Failed restarting port %d\n", priv->port);
}
mutex_unlock(&priv->mdev->state_lock);
return 0; return 0;
} }
...@@ -596,6 +982,7 @@ static int mlx4_en_get_rxfh(struct net_device *dev, u32 *ring_index, u8 *key) ...@@ -596,6 +982,7 @@ static int mlx4_en_get_rxfh(struct net_device *dev, u32 *ring_index, u8 *key)
int err = 0; int err = 0;
rss_rings = priv->prof->rss_rings ?: priv->rx_ring_num; rss_rings = priv->prof->rss_rings ?: priv->rx_ring_num;
rss_rings = 1 << ilog2(rss_rings);
while (n--) { while (n--) {
ring_index[n] = rss_map->qps[n % rss_rings].qpn - ring_index[n] = rss_map->qps[n % rss_rings].qpn -
...@@ -1309,6 +1696,86 @@ static int mlx4_en_set_tunable(struct net_device *dev, ...@@ -1309,6 +1696,86 @@ static int mlx4_en_set_tunable(struct net_device *dev,
return ret; return ret;
} }
static int mlx4_en_get_module_info(struct net_device *dev,
struct ethtool_modinfo *modinfo)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
int ret;
u8 data[4];
/* Read first 2 bytes to get Module & REV ID */
ret = mlx4_get_module_info(mdev->dev, priv->port,
0/*offset*/, 2/*size*/, data);
if (ret < 2)
return -EIO;
switch (data[0] /* identifier */) {
case MLX4_MODULE_ID_QSFP:
modinfo->type = ETH_MODULE_SFF_8436;
modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
break;
case MLX4_MODULE_ID_QSFP_PLUS:
if (data[1] >= 0x3) { /* revision id */
modinfo->type = ETH_MODULE_SFF_8636;
modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN;
} else {
modinfo->type = ETH_MODULE_SFF_8436;
modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
}
break;
case MLX4_MODULE_ID_QSFP28:
modinfo->type = ETH_MODULE_SFF_8636;
modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN;
break;
case MLX4_MODULE_ID_SFP:
modinfo->type = ETH_MODULE_SFF_8472;
modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
break;
default:
return -ENOSYS;
}
return 0;
}
static int mlx4_en_get_module_eeprom(struct net_device *dev,
struct ethtool_eeprom *ee,
u8 *data)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
int offset = ee->offset;
int i = 0, ret;
if (ee->len == 0)
return -EINVAL;
memset(data, 0, ee->len);
while (i < ee->len) {
en_dbg(DRV, priv,
"mlx4_get_module_info i(%d) offset(%d) len(%d)\n",
i, offset, ee->len - i);
ret = mlx4_get_module_info(mdev->dev, priv->port,
offset, ee->len - i, data + i);
if (!ret) /* Done reading */
return 0;
if (ret < 0) {
en_err(priv,
"mlx4_get_module_info i(%d) offset(%d) bytes_to_read(%d) - FAILED (0x%x)\n",
i, offset, ee->len - i, ret);
return 0;
}
i += ret;
offset += ret;
}
return 0;
}
const struct ethtool_ops mlx4_en_ethtool_ops = { const struct ethtool_ops mlx4_en_ethtool_ops = {
.get_drvinfo = mlx4_en_get_drvinfo, .get_drvinfo = mlx4_en_get_drvinfo,
...@@ -1341,6 +1808,8 @@ const struct ethtool_ops mlx4_en_ethtool_ops = { ...@@ -1341,6 +1808,8 @@ const struct ethtool_ops mlx4_en_ethtool_ops = {
.get_priv_flags = mlx4_en_get_priv_flags, .get_priv_flags = mlx4_en_get_priv_flags,
.get_tunable = mlx4_en_get_tunable, .get_tunable = mlx4_en_get_tunable,
.set_tunable = mlx4_en_set_tunable, .set_tunable = mlx4_en_set_tunable,
.get_module_info = mlx4_en_get_module_info,
.get_module_eeprom = mlx4_en_get_module_eeprom
}; };
......
...@@ -221,15 +221,12 @@ static void *mlx4_en_add(struct mlx4_dev *dev) ...@@ -221,15 +221,12 @@ static void *mlx4_en_add(struct mlx4_dev *dev)
{ {
struct mlx4_en_dev *mdev; struct mlx4_en_dev *mdev;
int i; int i;
int err;
printk_once(KERN_INFO "%s", mlx4_en_version); printk_once(KERN_INFO "%s", mlx4_en_version);
mdev = kzalloc(sizeof(*mdev), GFP_KERNEL); mdev = kzalloc(sizeof(*mdev), GFP_KERNEL);
if (!mdev) { if (!mdev)
err = -ENOMEM;
goto err_free_res; goto err_free_res;
}
if (mlx4_pd_alloc(dev, &mdev->priv_pdn)) if (mlx4_pd_alloc(dev, &mdev->priv_pdn))
goto err_free_dev; goto err_free_dev;
...@@ -264,8 +261,7 @@ static void *mlx4_en_add(struct mlx4_dev *dev) ...@@ -264,8 +261,7 @@ static void *mlx4_en_add(struct mlx4_dev *dev)
} }
/* Build device profile according to supplied module parameters */ /* Build device profile according to supplied module parameters */
err = mlx4_en_get_profile(mdev); if (mlx4_en_get_profile(mdev)) {
if (err) {
mlx4_err(mdev, "Bad module parameters, aborting\n"); mlx4_err(mdev, "Bad module parameters, aborting\n");
goto err_mr; goto err_mr;
} }
...@@ -286,10 +282,8 @@ static void *mlx4_en_add(struct mlx4_dev *dev) ...@@ -286,10 +282,8 @@ static void *mlx4_en_add(struct mlx4_dev *dev)
* Note: we cannot use the shared workqueue because of deadlocks caused * Note: we cannot use the shared workqueue because of deadlocks caused
* by the rtnl lock */ * by the rtnl lock */
mdev->workqueue = create_singlethread_workqueue("mlx4_en"); mdev->workqueue = create_singlethread_workqueue("mlx4_en");
if (!mdev->workqueue) { if (!mdev->workqueue)
err = -ENOMEM;
goto err_mr; goto err_mr;
}
/* At this stage all non-port specific tasks are complete: /* At this stage all non-port specific tasks are complete:
* mark the card state as up */ * mark the card state as up */
......
...@@ -575,7 +575,7 @@ static int mlx4_en_get_qp(struct mlx4_en_priv *priv) ...@@ -575,7 +575,7 @@ static int mlx4_en_get_qp(struct mlx4_en_priv *priv)
struct mlx4_mac_entry *entry; struct mlx4_mac_entry *entry;
int index = 0; int index = 0;
int err = 0; int err = 0;
u64 reg_id; u64 reg_id = 0;
int *qpn = &priv->base_qpn; int *qpn = &priv->base_qpn;
u64 mac = mlx4_mac_to_u64(priv->dev->dev_addr); u64 mac = mlx4_mac_to_u64(priv->dev->dev_addr);
...@@ -1843,8 +1843,7 @@ void mlx4_en_stop_port(struct net_device *dev, int detach) ...@@ -1843,8 +1843,7 @@ void mlx4_en_stop_port(struct net_device *dev, int detach)
} }
local_bh_enable(); local_bh_enable();
while (test_bit(NAPI_STATE_SCHED, &cq->napi.state)) napi_synchronize(&cq->napi);
msleep(1);
mlx4_en_deactivate_rx_ring(priv, priv->rx_ring[i]); mlx4_en_deactivate_rx_ring(priv, priv->rx_ring[i]);
mlx4_en_deactivate_cq(priv, cq); mlx4_en_deactivate_cq(priv, cq);
...@@ -2157,7 +2156,7 @@ static int mlx4_en_hwtstamp_set(struct net_device *dev, struct ifreq *ifr) ...@@ -2157,7 +2156,7 @@ static int mlx4_en_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
return -ERANGE; return -ERANGE;
} }
if (mlx4_en_timestamp_config(dev, config.tx_type, config.rx_filter)) { if (mlx4_en_reset_config(dev, config, dev->features)) {
config.tx_type = HWTSTAMP_TX_OFF; config.tx_type = HWTSTAMP_TX_OFF;
config.rx_filter = HWTSTAMP_FILTER_NONE; config.rx_filter = HWTSTAMP_FILTER_NONE;
} }
...@@ -2190,6 +2189,16 @@ static int mlx4_en_set_features(struct net_device *netdev, ...@@ -2190,6 +2189,16 @@ static int mlx4_en_set_features(struct net_device *netdev,
netdev_features_t features) netdev_features_t features)
{ {
struct mlx4_en_priv *priv = netdev_priv(netdev); struct mlx4_en_priv *priv = netdev_priv(netdev);
int ret = 0;
if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_HW_VLAN_CTAG_RX)) {
en_info(priv, "Turn %s RX vlan strip offload\n",
(features & NETIF_F_HW_VLAN_CTAG_RX) ? "ON" : "OFF");
ret = mlx4_en_reset_config(netdev, priv->hwtstamp_config,
features);
if (ret)
return ret;
}
if (features & NETIF_F_LOOPBACK) if (features & NETIF_F_LOOPBACK)
priv->ctrl_flags |= cpu_to_be32(MLX4_WQE_CTRL_FORCE_LOOPBACK); priv->ctrl_flags |= cpu_to_be32(MLX4_WQE_CTRL_FORCE_LOOPBACK);
...@@ -2431,6 +2440,21 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, ...@@ -2431,6 +2440,21 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
priv = netdev_priv(dev); priv = netdev_priv(dev);
memset(priv, 0, sizeof(struct mlx4_en_priv)); memset(priv, 0, sizeof(struct mlx4_en_priv));
spin_lock_init(&priv->stats_lock);
INIT_WORK(&priv->rx_mode_task, mlx4_en_do_set_rx_mode);
INIT_WORK(&priv->watchdog_task, mlx4_en_restart);
INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate);
INIT_DELAYED_WORK(&priv->stats_task, mlx4_en_do_get_stats);
INIT_DELAYED_WORK(&priv->service_task, mlx4_en_service_task);
#ifdef CONFIG_MLX4_EN_VXLAN
INIT_WORK(&priv->vxlan_add_task, mlx4_en_add_vxlan_offloads);
INIT_WORK(&priv->vxlan_del_task, mlx4_en_del_vxlan_offloads);
#endif
#ifdef CONFIG_RFS_ACCEL
INIT_LIST_HEAD(&priv->filters);
spin_lock_init(&priv->filters_lock);
#endif
priv->dev = dev; priv->dev = dev;
priv->mdev = mdev; priv->mdev = mdev;
priv->ddev = &mdev->pdev->dev; priv->ddev = &mdev->pdev->dev;
...@@ -2462,16 +2486,6 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, ...@@ -2462,16 +2486,6 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
priv->cqe_size = mdev->dev->caps.cqe_size; priv->cqe_size = mdev->dev->caps.cqe_size;
priv->mac_index = -1; priv->mac_index = -1;
priv->msg_enable = MLX4_EN_MSG_LEVEL; priv->msg_enable = MLX4_EN_MSG_LEVEL;
spin_lock_init(&priv->stats_lock);
INIT_WORK(&priv->rx_mode_task, mlx4_en_do_set_rx_mode);
INIT_WORK(&priv->watchdog_task, mlx4_en_restart);
INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate);
INIT_DELAYED_WORK(&priv->stats_task, mlx4_en_do_get_stats);
INIT_DELAYED_WORK(&priv->service_task, mlx4_en_service_task);
#ifdef CONFIG_MLX4_EN_VXLAN
INIT_WORK(&priv->vxlan_add_task, mlx4_en_add_vxlan_offloads);
INIT_WORK(&priv->vxlan_del_task, mlx4_en_del_vxlan_offloads);
#endif
#ifdef CONFIG_MLX4_EN_DCB #ifdef CONFIG_MLX4_EN_DCB
if (!mlx4_is_slave(priv->mdev->dev)) { if (!mlx4_is_slave(priv->mdev->dev)) {
if (mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_SET_ETH_SCHED) { if (mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_SET_ETH_SCHED) {
...@@ -2514,11 +2528,6 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, ...@@ -2514,11 +2528,6 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
if (err) if (err)
goto out; goto out;
#ifdef CONFIG_RFS_ACCEL
INIT_LIST_HEAD(&priv->filters);
spin_lock_init(&priv->filters_lock);
#endif
/* Initialize time stamping config */ /* Initialize time stamping config */
priv->hwtstamp_config.flags = 0; priv->hwtstamp_config.flags = 0;
priv->hwtstamp_config.tx_type = HWTSTAMP_TX_OFF; priv->hwtstamp_config.tx_type = HWTSTAMP_TX_OFF;
...@@ -2559,7 +2568,8 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, ...@@ -2559,7 +2568,8 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
dev->features = dev->hw_features | NETIF_F_HIGHDMA | dev->features = dev->hw_features | NETIF_F_HIGHDMA |
NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
NETIF_F_HW_VLAN_CTAG_FILTER; NETIF_F_HW_VLAN_CTAG_FILTER;
dev->hw_features |= NETIF_F_LOOPBACK; dev->hw_features |= NETIF_F_LOOPBACK |
NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
if (mdev->dev->caps.steering_mode == if (mdev->dev->caps.steering_mode ==
MLX4_STEERING_MODE_DEVICE_MANAGED) MLX4_STEERING_MODE_DEVICE_MANAGED)
...@@ -2633,3 +2643,79 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, ...@@ -2633,3 +2643,79 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
return err; return err;
} }
int mlx4_en_reset_config(struct net_device *dev,
struct hwtstamp_config ts_config,
netdev_features_t features)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
int port_up = 0;
int err = 0;
if (priv->hwtstamp_config.tx_type == ts_config.tx_type &&
priv->hwtstamp_config.rx_filter == ts_config.rx_filter &&
!DEV_FEATURE_CHANGED(dev, features, NETIF_F_HW_VLAN_CTAG_RX))
return 0; /* Nothing to change */
if (DEV_FEATURE_CHANGED(dev, features, NETIF_F_HW_VLAN_CTAG_RX) &&
(features & NETIF_F_HW_VLAN_CTAG_RX) &&
(priv->hwtstamp_config.rx_filter != HWTSTAMP_FILTER_NONE)) {
en_warn(priv, "Can't turn ON rx vlan offload while time-stamping rx filter is ON\n");
return -EINVAL;
}
mutex_lock(&mdev->state_lock);
if (priv->port_up) {
port_up = 1;
mlx4_en_stop_port(dev, 1);
}
mlx4_en_free_resources(priv);
en_warn(priv, "Changing device configuration rx filter(%x) rx vlan(%x)\n",
ts_config.rx_filter, !!(features & NETIF_F_HW_VLAN_CTAG_RX));
priv->hwtstamp_config.tx_type = ts_config.tx_type;
priv->hwtstamp_config.rx_filter = ts_config.rx_filter;
if (DEV_FEATURE_CHANGED(dev, features, NETIF_F_HW_VLAN_CTAG_RX)) {
if (features & NETIF_F_HW_VLAN_CTAG_RX)
dev->features |= NETIF_F_HW_VLAN_CTAG_RX;
else
dev->features &= ~NETIF_F_HW_VLAN_CTAG_RX;
} else if (ts_config.rx_filter == HWTSTAMP_FILTER_NONE) {
/* RX time-stamping is OFF, update the RX vlan offload
* to the latest wanted state
*/
if (dev->wanted_features & NETIF_F_HW_VLAN_CTAG_RX)
dev->features |= NETIF_F_HW_VLAN_CTAG_RX;
else
dev->features &= ~NETIF_F_HW_VLAN_CTAG_RX;
}
/* RX vlan offload and RX time-stamping can't co-exist !
* Regardless of the caller's choice,
* Turn Off RX vlan offload in case of time-stamping is ON
*/
if (ts_config.rx_filter != HWTSTAMP_FILTER_NONE) {
if (dev->features & NETIF_F_HW_VLAN_CTAG_RX)
en_warn(priv, "Turning off RX vlan offload since RX time-stamping is ON\n");
dev->features &= ~NETIF_F_HW_VLAN_CTAG_RX;
}
err = mlx4_en_alloc_resources(priv);
if (err) {
en_err(priv, "Failed reallocating port resources\n");
goto out;
}
if (port_up) {
err = mlx4_en_start_port(dev);
if (err)
en_err(priv, "Failed starting port\n");
}
out:
mutex_unlock(&mdev->state_lock);
netdev_features_change(dev);
return err;
}
...@@ -91,21 +91,37 @@ int mlx4_en_QUERY_PORT(struct mlx4_en_dev *mdev, u8 port) ...@@ -91,21 +91,37 @@ int mlx4_en_QUERY_PORT(struct mlx4_en_dev *mdev, u8 port)
* already synchronized, no need in locking */ * already synchronized, no need in locking */
state->link_state = !!(qport_context->link_up & MLX4_EN_LINK_UP_MASK); state->link_state = !!(qport_context->link_up & MLX4_EN_LINK_UP_MASK);
switch (qport_context->link_speed & MLX4_EN_SPEED_MASK) { switch (qport_context->link_speed & MLX4_EN_SPEED_MASK) {
case MLX4_EN_100M_SPEED:
state->link_speed = SPEED_100;
break;
case MLX4_EN_1G_SPEED: case MLX4_EN_1G_SPEED:
state->link_speed = 1000; state->link_speed = SPEED_1000;
break; break;
case MLX4_EN_10G_SPEED_XAUI: case MLX4_EN_10G_SPEED_XAUI:
case MLX4_EN_10G_SPEED_XFI: case MLX4_EN_10G_SPEED_XFI:
state->link_speed = 10000; state->link_speed = SPEED_10000;
break;
case MLX4_EN_20G_SPEED:
state->link_speed = SPEED_20000;
break; break;
case MLX4_EN_40G_SPEED: case MLX4_EN_40G_SPEED:
state->link_speed = 40000; state->link_speed = SPEED_40000;
break;
case MLX4_EN_56G_SPEED:
state->link_speed = SPEED_56000;
break; break;
default: default:
state->link_speed = -1; state->link_speed = -1;
break; break;
} }
state->transciver = qport_context->transceiver;
state->transceiver = qport_context->transceiver;
state->flags = 0; /* Reset and recalculate the port flags */
state->flags |= (qport_context->link_up & MLX4_EN_ANC_MASK) ?
MLX4_EN_PORT_ANC : 0;
state->flags |= (qport_context->autoneg & MLX4_EN_AUTONEG_MASK) ?
MLX4_EN_PORT_ANE : 0;
out: out:
mlx4_free_cmd_mailbox(mdev->dev, mailbox); mlx4_free_cmd_mailbox(mdev->dev, mailbox);
......
...@@ -53,22 +53,49 @@ enum { ...@@ -53,22 +53,49 @@ enum {
MLX4_MCAST_ENABLE = 2, MLX4_MCAST_ENABLE = 2,
}; };
enum mlx4_link_mode {
MLX4_1000BASE_CX_SGMII = 0,
MLX4_1000BASE_KX = 1,
MLX4_10GBASE_CX4 = 2,
MLX4_10GBASE_KX4 = 3,
MLX4_10GBASE_KR = 4,
MLX4_20GBASE_KR2 = 5,
MLX4_40GBASE_CR4 = 6,
MLX4_40GBASE_KR4 = 7,
MLX4_56GBASE_KR4 = 8,
MLX4_10GBASE_CR = 12,
MLX4_10GBASE_SR = 13,
MLX4_40GBASE_SR4 = 15,
MLX4_56GBASE_CR4 = 17,
MLX4_56GBASE_SR4 = 18,
MLX4_100BASE_TX = 24,
MLX4_1000BASE_T = 25,
MLX4_10GBASE_T = 26,
};
#define MLX4_PROT_MASK(link_mode) (1<<link_mode)
enum { enum {
MLX4_EN_1G_SPEED = 0x02, MLX4_EN_100M_SPEED = 0x04,
MLX4_EN_10G_SPEED_XFI = 0x01,
MLX4_EN_10G_SPEED_XAUI = 0x00, MLX4_EN_10G_SPEED_XAUI = 0x00,
MLX4_EN_10G_SPEED_XFI = 0x01,
MLX4_EN_1G_SPEED = 0x02,
MLX4_EN_20G_SPEED = 0x08,
MLX4_EN_40G_SPEED = 0x40, MLX4_EN_40G_SPEED = 0x40,
MLX4_EN_56G_SPEED = 0x20,
MLX4_EN_OTHER_SPEED = 0x0f, MLX4_EN_OTHER_SPEED = 0x0f,
}; };
struct mlx4_en_query_port_context { struct mlx4_en_query_port_context {
u8 link_up; u8 link_up;
#define MLX4_EN_LINK_UP_MASK 0x80 #define MLX4_EN_LINK_UP_MASK 0x80
u8 reserved; #define MLX4_EN_ANC_MASK 0x40
u8 autoneg;
#define MLX4_EN_AUTONEG_MASK 0x80
__be16 mtu; __be16 mtu;
u8 reserved2; u8 reserved2;
u8 link_speed; u8 link_speed;
#define MLX4_EN_SPEED_MASK 0x43 #define MLX4_EN_SPEED_MASK 0x6f
u16 reserved3[5]; u16 reserved3[5];
__be64 mac; __be64 mac;
u8 transceiver; u8 transceiver;
......
...@@ -119,7 +119,6 @@ static int mlx4_en_alloc_frags(struct mlx4_en_priv *priv, ...@@ -119,7 +119,6 @@ static int mlx4_en_alloc_frags(struct mlx4_en_priv *priv,
out: out:
while (i--) { while (i--) {
frag_info = &priv->frag_info[i];
if (page_alloc[i].page != ring_alloc[i].page) { if (page_alloc[i].page != ring_alloc[i].page) {
dma_unmap_page(priv->ddev, page_alloc[i].dma, dma_unmap_page(priv->ddev, page_alloc[i].dma,
page_alloc[i].page_size, PCI_DMA_FROMDEVICE); page_alloc[i].page_size, PCI_DMA_FROMDEVICE);
......
...@@ -129,11 +129,15 @@ static int mlx4_en_test_speed(struct mlx4_en_priv *priv) ...@@ -129,11 +129,15 @@ static int mlx4_en_test_speed(struct mlx4_en_priv *priv)
if (mlx4_en_QUERY_PORT(priv->mdev, priv->port)) if (mlx4_en_QUERY_PORT(priv->mdev, priv->port))
return -ENOMEM; return -ENOMEM;
/* The device supports 1G, 10G and 40G speeds */ /* The device supports 100M, 1G, 10G, 20G, 40G and 56G speed */
if (priv->port_state.link_speed != 1000 && if (priv->port_state.link_speed != SPEED_100 &&
priv->port_state.link_speed != 10000 && priv->port_state.link_speed != SPEED_1000 &&
priv->port_state.link_speed != 40000) priv->port_state.link_speed != SPEED_10000 &&
priv->port_state.link_speed != SPEED_20000 &&
priv->port_state.link_speed != SPEED_40000 &&
priv->port_state.link_speed != SPEED_56000)
return priv->port_state.link_speed; return priv->port_state.link_speed;
return 0; return 0;
} }
......
...@@ -139,7 +139,9 @@ static void dump_dev_cap_flags2(struct mlx4_dev *dev, u64 flags) ...@@ -139,7 +139,9 @@ static void dump_dev_cap_flags2(struct mlx4_dev *dev, u64 flags)
[10] = "TCP/IP offloads/flow-steering for VXLAN support", [10] = "TCP/IP offloads/flow-steering for VXLAN support",
[11] = "MAD DEMUX (Secure-Host) support", [11] = "MAD DEMUX (Secure-Host) support",
[12] = "Large cache line (>64B) CQE stride support", [12] = "Large cache line (>64B) CQE stride support",
[13] = "Large cache line (>64B) EQE stride support" [13] = "Large cache line (>64B) EQE stride support",
[14] = "Ethernet protocol control support",
[15] = "Ethernet Backplane autoneg support"
}; };
int i; int i;
...@@ -560,6 +562,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) ...@@ -560,6 +562,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
#define QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET 0x76 #define QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET 0x76
#define QUERY_DEV_CAP_FLOW_STEERING_MAX_QP_OFFSET 0x77 #define QUERY_DEV_CAP_FLOW_STEERING_MAX_QP_OFFSET 0x77
#define QUERY_DEV_CAP_CQ_EQ_CACHE_LINE_STRIDE 0x7a #define QUERY_DEV_CAP_CQ_EQ_CACHE_LINE_STRIDE 0x7a
#define QUERY_DEV_CAP_ETH_PROT_CTRL_OFFSET 0x7a
#define QUERY_DEV_CAP_RDMARC_ENTRY_SZ_OFFSET 0x80 #define QUERY_DEV_CAP_RDMARC_ENTRY_SZ_OFFSET 0x80
#define QUERY_DEV_CAP_QPC_ENTRY_SZ_OFFSET 0x82 #define QUERY_DEV_CAP_QPC_ENTRY_SZ_OFFSET 0x82
#define QUERY_DEV_CAP_AUX_ENTRY_SZ_OFFSET 0x84 #define QUERY_DEV_CAP_AUX_ENTRY_SZ_OFFSET 0x84
...@@ -573,6 +576,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) ...@@ -573,6 +576,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
#define QUERY_DEV_CAP_BMME_FLAGS_OFFSET 0x94 #define QUERY_DEV_CAP_BMME_FLAGS_OFFSET 0x94
#define QUERY_DEV_CAP_RSVD_LKEY_OFFSET 0x98 #define QUERY_DEV_CAP_RSVD_LKEY_OFFSET 0x98
#define QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET 0xa0 #define QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET 0xa0
#define QUERY_DEV_CAP_ETH_BACKPL_OFFSET 0x9c
#define QUERY_DEV_CAP_FW_REASSIGN_MAC 0x9d #define QUERY_DEV_CAP_FW_REASSIGN_MAC 0x9d
#define QUERY_DEV_CAP_VXLAN 0x9e #define QUERY_DEV_CAP_VXLAN 0x9e
#define QUERY_DEV_CAP_MAD_DEMUX_OFFSET 0xb0 #define QUERY_DEV_CAP_MAD_DEMUX_OFFSET 0xb0
...@@ -737,15 +741,19 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) ...@@ -737,15 +741,19 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
MLX4_GET(size, outbox, QUERY_DEV_CAP_MAX_DESC_SZ_RQ_OFFSET); MLX4_GET(size, outbox, QUERY_DEV_CAP_MAX_DESC_SZ_RQ_OFFSET);
dev_cap->max_rq_desc_sz = size; dev_cap->max_rq_desc_sz = size;
MLX4_GET(field, outbox, QUERY_DEV_CAP_CQ_EQ_CACHE_LINE_STRIDE); MLX4_GET(field, outbox, QUERY_DEV_CAP_CQ_EQ_CACHE_LINE_STRIDE);
if (field & (1 << 5))
dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_ETH_PROT_CTRL;
if (field & (1 << 6)) if (field & (1 << 6))
dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_CQE_STRIDE; dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_CQE_STRIDE;
if (field & (1 << 7)) if (field & (1 << 7))
dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_EQE_STRIDE; dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_EQE_STRIDE;
MLX4_GET(dev_cap->bmme_flags, outbox, MLX4_GET(dev_cap->bmme_flags, outbox,
QUERY_DEV_CAP_BMME_FLAGS_OFFSET); QUERY_DEV_CAP_BMME_FLAGS_OFFSET);
MLX4_GET(dev_cap->reserved_lkey, outbox, MLX4_GET(dev_cap->reserved_lkey, outbox,
QUERY_DEV_CAP_RSVD_LKEY_OFFSET); QUERY_DEV_CAP_RSVD_LKEY_OFFSET);
MLX4_GET(field32, outbox, QUERY_DEV_CAP_ETH_BACKPL_OFFSET);
if (field32 & (1 << 0))
dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_ETH_BACKPL_AN_REP;
MLX4_GET(field, outbox, QUERY_DEV_CAP_FW_REASSIGN_MAC); MLX4_GET(field, outbox, QUERY_DEV_CAP_FW_REASSIGN_MAC);
if (field & 1<<6) if (field & 1<<6)
dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_REASSIGN_MAC_EN; dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_REASSIGN_MAC_EN;
...@@ -2144,3 +2152,114 @@ int mlx4_config_mad_demux(struct mlx4_dev *dev) ...@@ -2144,3 +2152,114 @@ int mlx4_config_mad_demux(struct mlx4_dev *dev)
mlx4_free_cmd_mailbox(dev, mailbox); mlx4_free_cmd_mailbox(dev, mailbox);
return err; return err;
} }
/* Access Reg commands */
enum mlx4_access_reg_masks {
MLX4_ACCESS_REG_STATUS_MASK = 0x7f,
MLX4_ACCESS_REG_METHOD_MASK = 0x7f,
MLX4_ACCESS_REG_LEN_MASK = 0x7ff
};
struct mlx4_access_reg {
__be16 constant1;
u8 status;
u8 resrvd1;
__be16 reg_id;
u8 method;
u8 constant2;
__be32 resrvd2[2];
__be16 len_const;
__be16 resrvd3;
#define MLX4_ACCESS_REG_HEADER_SIZE (20)
u8 reg_data[MLX4_MAILBOX_SIZE-MLX4_ACCESS_REG_HEADER_SIZE];
} __attribute__((__packed__));
/**
* mlx4_ACCESS_REG - Generic access reg command.
* @dev: mlx4_dev.
* @reg_id: register ID to access.
* @method: Access method Read/Write.
* @reg_len: register length to Read/Write in bytes.
* @reg_data: reg_data pointer to Read/Write From/To.
*
* Access ConnectX registers FW command.
* Returns 0 on success and copies outbox mlx4_access_reg data
* field into reg_data or a negative error code.
*/
static int mlx4_ACCESS_REG(struct mlx4_dev *dev, u16 reg_id,
enum mlx4_access_reg_method method,
u16 reg_len, void *reg_data)
{
struct mlx4_cmd_mailbox *inbox, *outbox;
struct mlx4_access_reg *inbuf, *outbuf;
int err;
inbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(inbox))
return PTR_ERR(inbox);
outbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(outbox)) {
mlx4_free_cmd_mailbox(dev, inbox);
return PTR_ERR(outbox);
}
inbuf = inbox->buf;
outbuf = outbox->buf;
inbuf->constant1 = cpu_to_be16(0x1<<11 | 0x4);
inbuf->constant2 = 0x1;
inbuf->reg_id = cpu_to_be16(reg_id);
inbuf->method = method & MLX4_ACCESS_REG_METHOD_MASK;
reg_len = min(reg_len, (u16)(sizeof(inbuf->reg_data)));
inbuf->len_const =
cpu_to_be16(((reg_len/4 + 1) & MLX4_ACCESS_REG_LEN_MASK) |
((0x3) << 12));
memcpy(inbuf->reg_data, reg_data, reg_len);
err = mlx4_cmd_box(dev, inbox->dma, outbox->dma, 0, 0,
MLX4_CMD_ACCESS_REG, MLX4_CMD_TIME_CLASS_C,
MLX4_CMD_NATIVE);
if (err)
goto out;
if (outbuf->status & MLX4_ACCESS_REG_STATUS_MASK) {
err = outbuf->status & MLX4_ACCESS_REG_STATUS_MASK;
mlx4_err(dev,
"MLX4_CMD_ACCESS_REG(%x) returned REG status (%x)\n",
reg_id, err);
goto out;
}
memcpy(reg_data, outbuf->reg_data, reg_len);
out:
mlx4_free_cmd_mailbox(dev, inbox);
mlx4_free_cmd_mailbox(dev, outbox);
return err;
}
/* ConnectX registers IDs */
enum mlx4_reg_id {
MLX4_REG_ID_PTYS = 0x5004,
};
/**
* mlx4_ACCESS_PTYS_REG - Access PTYs (Port Type and Speed)
* register
* @dev: mlx4_dev.
* @method: Access method Read/Write.
* @ptys_reg: PTYS register data pointer.
*
* Access ConnectX PTYS register, to Read/Write Port Type/Speed
* configuration
* Returns 0 on success or a negative error code.
*/
int mlx4_ACCESS_PTYS_REG(struct mlx4_dev *dev,
enum mlx4_access_reg_method method,
struct mlx4_ptys_reg *ptys_reg)
{
return mlx4_ACCESS_REG(dev, MLX4_REG_ID_PTYS,
method, sizeof(*ptys_reg), ptys_reg);
}
EXPORT_SYMBOL_GPL(mlx4_ACCESS_PTYS_REG);
...@@ -421,10 +421,16 @@ struct mlx4_en_rss_map { ...@@ -421,10 +421,16 @@ struct mlx4_en_rss_map {
enum mlx4_qp_state indir_state; enum mlx4_qp_state indir_state;
}; };
enum mlx4_en_port_flag {
MLX4_EN_PORT_ANC = 1<<0, /* Auto-negotiation complete */
MLX4_EN_PORT_ANE = 1<<1, /* Auto-negotiation enabled */
};
struct mlx4_en_port_state { struct mlx4_en_port_state {
int link_state; int link_state;
int link_speed; int link_speed;
int transciver; int transceiver;
u32 flags;
}; };
struct mlx4_en_pkt_stats { struct mlx4_en_pkt_stats {
...@@ -829,6 +835,13 @@ void mlx4_en_cleanup_filters(struct mlx4_en_priv *priv); ...@@ -829,6 +835,13 @@ void mlx4_en_cleanup_filters(struct mlx4_en_priv *priv);
void mlx4_en_ex_selftest(struct net_device *dev, u32 *flags, u64 *buf); void mlx4_en_ex_selftest(struct net_device *dev, u32 *flags, u64 *buf);
void mlx4_en_ptp_overflow_check(struct mlx4_en_dev *mdev); void mlx4_en_ptp_overflow_check(struct mlx4_en_dev *mdev);
#define DEV_FEATURE_CHANGED(dev, new_features, feature) \
((dev->features & feature) ^ (new_features & feature))
int mlx4_en_reset_config(struct net_device *dev,
struct hwtstamp_config ts_config,
netdev_features_t new_features);
/* /*
* Functions for time stamping * Functions for time stamping
*/ */
...@@ -838,9 +851,6 @@ void mlx4_en_fill_hwtstamps(struct mlx4_en_dev *mdev, ...@@ -838,9 +851,6 @@ void mlx4_en_fill_hwtstamps(struct mlx4_en_dev *mdev,
u64 timestamp); u64 timestamp);
void mlx4_en_init_timestamp(struct mlx4_en_dev *mdev); void mlx4_en_init_timestamp(struct mlx4_en_dev *mdev);
void mlx4_en_remove_timestamp(struct mlx4_en_dev *mdev); void mlx4_en_remove_timestamp(struct mlx4_en_dev *mdev);
int mlx4_en_timestamp_config(struct net_device *dev,
int tx_type,
int rx_filter);
/* Globals /* Globals
*/ */
......
...@@ -1311,3 +1311,159 @@ int mlx4_get_roce_gid_from_slave(struct mlx4_dev *dev, int port, int slave_id, ...@@ -1311,3 +1311,159 @@ int mlx4_get_roce_gid_from_slave(struct mlx4_dev *dev, int port, int slave_id,
return 0; return 0;
} }
EXPORT_SYMBOL(mlx4_get_roce_gid_from_slave); EXPORT_SYMBOL(mlx4_get_roce_gid_from_slave);
/* Cable Module Info */
#define MODULE_INFO_MAX_READ 48
#define I2C_ADDR_LOW 0x50
#define I2C_ADDR_HIGH 0x51
#define I2C_PAGE_SIZE 256
/* Module Info Data */
struct mlx4_cable_info {
u8 i2c_addr;
u8 page_num;
__be16 dev_mem_address;
__be16 reserved1;
__be16 size;
__be32 reserved2[2];
u8 data[MODULE_INFO_MAX_READ];
};
enum cable_info_err {
CABLE_INF_INV_PORT = 0x1,
CABLE_INF_OP_NOSUP = 0x2,
CABLE_INF_NOT_CONN = 0x3,
CABLE_INF_NO_EEPRM = 0x4,
CABLE_INF_PAGE_ERR = 0x5,
CABLE_INF_INV_ADDR = 0x6,
CABLE_INF_I2C_ADDR = 0x7,
CABLE_INF_QSFP_VIO = 0x8,
CABLE_INF_I2C_BUSY = 0x9,
};
#define MAD_STATUS_2_CABLE_ERR(mad_status) ((mad_status >> 8) & 0xFF)
static inline const char *cable_info_mad_err_str(u16 mad_status)
{
u8 err = MAD_STATUS_2_CABLE_ERR(mad_status);
switch (err) {
case CABLE_INF_INV_PORT:
return "invalid port selected";
case CABLE_INF_OP_NOSUP:
return "operation not supported for this port (the port is of type CX4 or internal)";
case CABLE_INF_NOT_CONN:
return "cable is not connected";
case CABLE_INF_NO_EEPRM:
return "the connected cable has no EPROM (passive copper cable)";
case CABLE_INF_PAGE_ERR:
return "page number is greater than 15";
case CABLE_INF_INV_ADDR:
return "invalid device_address or size (that is, size equals 0 or address+size is greater than 256)";
case CABLE_INF_I2C_ADDR:
return "invalid I2C slave address";
case CABLE_INF_QSFP_VIO:
return "at least one cable violates the QSFP specification and ignores the modsel signal";
case CABLE_INF_I2C_BUSY:
return "I2C bus is constantly busy";
}
return "Unknown Error";
}
/**
* mlx4_get_module_info - Read cable module eeprom data
* @dev: mlx4_dev.
* @port: port number.
* @offset: byte offset in eeprom to start reading data from.
* @size: num of bytes to read.
* @data: output buffer to put the requested data into.
*
* Reads cable module eeprom data, puts the outcome data into
* data pointer paramer.
* Returns num of read bytes on success or a negative error
* code.
*/
int mlx4_get_module_info(struct mlx4_dev *dev, u8 port,
u16 offset, u16 size, u8 *data)
{
struct mlx4_cmd_mailbox *inbox, *outbox;
struct mlx4_mad_ifc *inmad, *outmad;
struct mlx4_cable_info *cable_info;
u16 i2c_addr;
int ret;
if (size > MODULE_INFO_MAX_READ)
size = MODULE_INFO_MAX_READ;
inbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(inbox))
return PTR_ERR(inbox);
outbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(outbox)) {
mlx4_free_cmd_mailbox(dev, inbox);
return PTR_ERR(outbox);
}
inmad = (struct mlx4_mad_ifc *)(inbox->buf);
outmad = (struct mlx4_mad_ifc *)(outbox->buf);
inmad->method = 0x1; /* Get */
inmad->class_version = 0x1;
inmad->mgmt_class = 0x1;
inmad->base_version = 0x1;
inmad->attr_id = cpu_to_be16(0xFF60); /* Module Info */
if (offset < I2C_PAGE_SIZE && offset + size > I2C_PAGE_SIZE)
/* Cross pages reads are not allowed
* read until offset 256 in low page
*/
size -= offset + size - I2C_PAGE_SIZE;
i2c_addr = I2C_ADDR_LOW;
if (offset >= I2C_PAGE_SIZE) {
/* Reset offset to high page */
i2c_addr = I2C_ADDR_HIGH;
offset -= I2C_PAGE_SIZE;
}
cable_info = (struct mlx4_cable_info *)inmad->data;
cable_info->dev_mem_address = cpu_to_be16(offset);
cable_info->page_num = 0;
cable_info->i2c_addr = i2c_addr;
cable_info->size = cpu_to_be16(size);
ret = mlx4_cmd_box(dev, inbox->dma, outbox->dma, port, 3,
MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C,
MLX4_CMD_NATIVE);
if (ret)
goto out;
if (be16_to_cpu(outmad->status)) {
/* Mad returned with bad status */
ret = be16_to_cpu(outmad->status);
mlx4_warn(dev,
"MLX4_CMD_MAD_IFC Get Module info attr(%x) port(%d) i2c_addr(%x) offset(%d) size(%d): Response Mad Status(%x) - %s\n",
0xFF60, port, i2c_addr, offset, size,
ret, cable_info_mad_err_str(ret));
if (i2c_addr == I2C_ADDR_HIGH &&
MAD_STATUS_2_CABLE_ERR(ret) == CABLE_INF_I2C_ADDR)
/* Some SFP cables do not support i2c slave
* address 0x51 (high page), abort silently.
*/
ret = 0;
else
ret = -ret;
goto out;
}
cable_info = (struct mlx4_cable_info *)outmad->data;
memcpy(data, cable_info->data, size);
ret = size;
out:
mlx4_free_cmd_mailbox(dev, inbox);
mlx4_free_cmd_mailbox(dev, outbox);
return ret;
}
EXPORT_SYMBOL(mlx4_get_module_info);
...@@ -67,6 +67,8 @@ enum { ...@@ -67,6 +67,8 @@ enum {
MLX4_CMD_MAP_ICM_AUX = 0xffc, MLX4_CMD_MAP_ICM_AUX = 0xffc,
MLX4_CMD_UNMAP_ICM_AUX = 0xffb, MLX4_CMD_UNMAP_ICM_AUX = 0xffb,
MLX4_CMD_SET_ICM_SIZE = 0xffd, MLX4_CMD_SET_ICM_SIZE = 0xffd,
MLX4_CMD_ACCESS_REG = 0x3b,
/*master notify fw on finish for slave's flr*/ /*master notify fw on finish for slave's flr*/
MLX4_CMD_INFORM_FLR_DONE = 0x5b, MLX4_CMD_INFORM_FLR_DONE = 0x5b,
MLX4_CMD_GET_OP_REQ = 0x59, MLX4_CMD_GET_OP_REQ = 0x59,
......
...@@ -186,7 +186,9 @@ enum { ...@@ -186,7 +186,9 @@ enum {
MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS = 1LL << 10, MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS = 1LL << 10,
MLX4_DEV_CAP_FLAG2_MAD_DEMUX = 1LL << 11, MLX4_DEV_CAP_FLAG2_MAD_DEMUX = 1LL << 11,
MLX4_DEV_CAP_FLAG2_CQE_STRIDE = 1LL << 12, MLX4_DEV_CAP_FLAG2_CQE_STRIDE = 1LL << 12,
MLX4_DEV_CAP_FLAG2_EQE_STRIDE = 1LL << 13 MLX4_DEV_CAP_FLAG2_EQE_STRIDE = 1LL << 13,
MLX4_DEV_CAP_FLAG2_ETH_PROT_CTRL = 1LL << 14,
MLX4_DEV_CAP_FLAG2_ETH_BACKPL_AN_REP = 1LL << 15
}; };
enum { enum {
...@@ -379,6 +381,13 @@ enum { ...@@ -379,6 +381,13 @@ enum {
#define MSTR_SM_CHANGE_MASK (MLX4_EQ_PORT_INFO_MSTR_SM_SL_CHANGE_MASK | \ #define MSTR_SM_CHANGE_MASK (MLX4_EQ_PORT_INFO_MSTR_SM_SL_CHANGE_MASK | \
MLX4_EQ_PORT_INFO_MSTR_SM_LID_CHANGE_MASK) MLX4_EQ_PORT_INFO_MSTR_SM_LID_CHANGE_MASK)
enum mlx4_module_id {
MLX4_MODULE_ID_SFP = 0x3,
MLX4_MODULE_ID_QSFP = 0xC,
MLX4_MODULE_ID_QSFP_PLUS = 0xD,
MLX4_MODULE_ID_QSFP28 = 0x11,
};
static inline u64 mlx4_fw_ver(u64 major, u64 minor, u64 subminor) static inline u64 mlx4_fw_ver(u64 major, u64 minor, u64 subminor)
{ {
return (major << 32) | (minor << 16) | subminor; return (major << 32) | (minor << 16) | subminor;
...@@ -799,6 +808,26 @@ struct mlx4_init_port_param { ...@@ -799,6 +808,26 @@ struct mlx4_init_port_param {
u64 si_guid; u64 si_guid;
}; };
#define MAD_IFC_DATA_SZ 192
/* MAD IFC Mailbox */
struct mlx4_mad_ifc {
u8 base_version;
u8 mgmt_class;
u8 class_version;
u8 method;
__be16 status;
__be16 class_specific;
__be64 tid;
__be16 attr_id;
__be16 resv;
__be32 attr_mod;
__be64 mkey;
__be16 dr_slid;
__be16 dr_dlid;
u8 reserved[28];
u8 data[MAD_IFC_DATA_SZ];
} __packed;
#define mlx4_foreach_port(port, dev, type) \ #define mlx4_foreach_port(port, dev, type) \
for ((port) = 1; (port) <= (dev)->caps.num_ports; (port)++) \ for ((port) = 1; (port) <= (dev)->caps.num_ports; (port)++) \
if ((type) == (dev)->caps.port_mask[(port)]) if ((type) == (dev)->caps.port_mask[(port)])
...@@ -1283,10 +1312,50 @@ int mlx4_mr_rereg_mem_write(struct mlx4_dev *dev, struct mlx4_mr *mr, ...@@ -1283,10 +1312,50 @@ int mlx4_mr_rereg_mem_write(struct mlx4_dev *dev, struct mlx4_mr *mr,
u64 iova, u64 size, int npages, u64 iova, u64 size, int npages,
int page_shift, struct mlx4_mpt_entry *mpt_entry); int page_shift, struct mlx4_mpt_entry *mpt_entry);
int mlx4_get_module_info(struct mlx4_dev *dev, u8 port,
u16 offset, u16 size, u8 *data);
/* Returns true if running in low memory profile (kdump kernel) */ /* Returns true if running in low memory profile (kdump kernel) */
static inline bool mlx4_low_memory_profile(void) static inline bool mlx4_low_memory_profile(void)
{ {
return is_kdump_kernel(); return is_kdump_kernel();
} }
/* ACCESS REG commands */
enum mlx4_access_reg_method {
MLX4_ACCESS_REG_QUERY = 0x1,
MLX4_ACCESS_REG_WRITE = 0x2,
};
/* ACCESS PTYS Reg command */
enum mlx4_ptys_proto {
MLX4_PTYS_IB = 1<<0,
MLX4_PTYS_EN = 1<<2,
};
struct mlx4_ptys_reg {
u8 resrvd1;
u8 local_port;
u8 resrvd2;
u8 proto_mask;
__be32 resrvd3[2];
__be32 eth_proto_cap;
__be16 ib_width_cap;
__be16 ib_speed_cap;
__be32 resrvd4;
__be32 eth_proto_admin;
__be16 ib_width_admin;
__be16 ib_speed_admin;
__be32 resrvd5;
__be32 eth_proto_oper;
__be16 ib_width_oper;
__be16 ib_speed_oper;
__be32 resrvd6;
__be32 eth_proto_lp_adv;
} __packed;
int mlx4_ACCESS_PTYS_REG(struct mlx4_dev *dev,
enum mlx4_access_reg_method method,
struct mlx4_ptys_reg *ptys_reg);
#endif /* MLX4_DEVICE_H */ #endif /* MLX4_DEVICE_H */
...@@ -1213,6 +1213,10 @@ enum ethtool_sfeatures_retval_bits { ...@@ -1213,6 +1213,10 @@ enum ethtool_sfeatures_retval_bits {
#define SUPPORTED_40000baseCR4_Full (1 << 24) #define SUPPORTED_40000baseCR4_Full (1 << 24)
#define SUPPORTED_40000baseSR4_Full (1 << 25) #define SUPPORTED_40000baseSR4_Full (1 << 25)
#define SUPPORTED_40000baseLR4_Full (1 << 26) #define SUPPORTED_40000baseLR4_Full (1 << 26)
#define SUPPORTED_56000baseKR4_Full (1 << 27)
#define SUPPORTED_56000baseCR4_Full (1 << 28)
#define SUPPORTED_56000baseSR4_Full (1 << 29)
#define SUPPORTED_56000baseLR4_Full (1 << 30)
#define ADVERTISED_10baseT_Half (1 << 0) #define ADVERTISED_10baseT_Half (1 << 0)
#define ADVERTISED_10baseT_Full (1 << 1) #define ADVERTISED_10baseT_Full (1 << 1)
...@@ -1241,6 +1245,10 @@ enum ethtool_sfeatures_retval_bits { ...@@ -1241,6 +1245,10 @@ enum ethtool_sfeatures_retval_bits {
#define ADVERTISED_40000baseCR4_Full (1 << 24) #define ADVERTISED_40000baseCR4_Full (1 << 24)
#define ADVERTISED_40000baseSR4_Full (1 << 25) #define ADVERTISED_40000baseSR4_Full (1 << 25)
#define ADVERTISED_40000baseLR4_Full (1 << 26) #define ADVERTISED_40000baseLR4_Full (1 << 26)
#define ADVERTISED_56000baseKR4_Full (1 << 27)
#define ADVERTISED_56000baseCR4_Full (1 << 28)
#define ADVERTISED_56000baseSR4_Full (1 << 29)
#define ADVERTISED_56000baseLR4_Full (1 << 30)
/* The following are all involved in forcing a particular link /* The following are all involved in forcing a particular link
* mode for the device for setting things. When getting the * mode for the device for setting things. When getting the
...@@ -1248,12 +1256,16 @@ enum ethtool_sfeatures_retval_bits { ...@@ -1248,12 +1256,16 @@ enum ethtool_sfeatures_retval_bits {
* it was forced up into this mode or autonegotiated. * it was forced up into this mode or autonegotiated.
*/ */
/* The forced speed, 10Mb, 100Mb, gigabit, 2.5Gb, 10GbE. */ /* The forced speed, 10Mb, 100Mb, gigabit, [2.5|10|20|40|56]GbE. */
#define SPEED_10 10 #define SPEED_10 10
#define SPEED_100 100 #define SPEED_100 100
#define SPEED_1000 1000 #define SPEED_1000 1000
#define SPEED_2500 2500 #define SPEED_2500 2500
#define SPEED_10000 10000 #define SPEED_10000 10000
#define SPEED_20000 20000
#define SPEED_40000 40000
#define SPEED_56000 56000
#define SPEED_UNKNOWN -1 #define SPEED_UNKNOWN -1
/* Duplex, half or full. */ /* Duplex, half or full. */
...@@ -1343,6 +1355,10 @@ enum ethtool_sfeatures_retval_bits { ...@@ -1343,6 +1355,10 @@ enum ethtool_sfeatures_retval_bits {
#define ETH_MODULE_SFF_8079_LEN 256 #define ETH_MODULE_SFF_8079_LEN 256
#define ETH_MODULE_SFF_8472 0x2 #define ETH_MODULE_SFF_8472 0x2
#define ETH_MODULE_SFF_8472_LEN 512 #define ETH_MODULE_SFF_8472_LEN 512
#define ETH_MODULE_SFF_8636 0x3
#define ETH_MODULE_SFF_8636_LEN 256
#define ETH_MODULE_SFF_8436 0x4
#define ETH_MODULE_SFF_8436_LEN 256
/* Reset flags */ /* Reset flags */
/* The reset() operation must clear the flags for the components which /* The reset() operation must clear the flags for the components which
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment