Commit ca68d563 authored by David S. Miller's avatar David S. Miller

Merge tag 'mlx5-fixes-2020-07-02' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saeed Mahameed says:

====================
mlx5 fixes 2020-07-02

This series introduces some fixes to mlx5 driver.

V1->v2:
 - Drop "ip -s" patch and mirred device hold reference patch.
 - Will revise them in a later submission.

Please pull and let me know if there is any problem.

For -stable v5.2
 ('net/mlx5: Fix eeprom support for SFP module')

For -stable v5.4
 ('net/mlx5e: Fix 50G per lane indication')

For -stable v5.5
 ('net/mlx5e: Fix CPU mapping after function reload to avoid aRFS RX crash')
 ('net/mlx5e: Fix VXLAN configuration restore after function reload')

For -stable v5.7
 ('net/mlx5e: CT: Fix memory leak in cleanup')
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 14b032b8 eb32b3f5
...@@ -29,6 +29,7 @@ struct mlx5e_dcbx { ...@@ -29,6 +29,7 @@ struct mlx5e_dcbx {
bool manual_buffer; bool manual_buffer;
u32 cable_len; u32 cable_len;
u32 xoff; u32 xoff;
u16 port_buff_cell_sz;
}; };
#define MLX5E_MAX_DSCP (64) #define MLX5E_MAX_DSCP (64)
......
...@@ -78,11 +78,26 @@ static const u32 mlx5e_ext_link_speed[MLX5E_EXT_LINK_MODES_NUMBER] = { ...@@ -78,11 +78,26 @@ static const u32 mlx5e_ext_link_speed[MLX5E_EXT_LINK_MODES_NUMBER] = {
[MLX5E_400GAUI_8] = 400000, [MLX5E_400GAUI_8] = 400000,
}; };
bool mlx5e_ptys_ext_supported(struct mlx5_core_dev *mdev)
{
struct mlx5e_port_eth_proto eproto;
int err;
if (MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet))
return true;
err = mlx5_port_query_eth_proto(mdev, 1, true, &eproto);
if (err)
return false;
return !!eproto.cap;
}
static void mlx5e_port_get_speed_arr(struct mlx5_core_dev *mdev, static void mlx5e_port_get_speed_arr(struct mlx5_core_dev *mdev,
const u32 **arr, u32 *size, const u32 **arr, u32 *size,
bool force_legacy) bool force_legacy)
{ {
bool ext = force_legacy ? false : MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet); bool ext = force_legacy ? false : mlx5e_ptys_ext_supported(mdev);
*size = ext ? ARRAY_SIZE(mlx5e_ext_link_speed) : *size = ext ? ARRAY_SIZE(mlx5e_ext_link_speed) :
ARRAY_SIZE(mlx5e_link_speed); ARRAY_SIZE(mlx5e_link_speed);
...@@ -177,7 +192,7 @@ int mlx5e_port_linkspeed(struct mlx5_core_dev *mdev, u32 *speed) ...@@ -177,7 +192,7 @@ int mlx5e_port_linkspeed(struct mlx5_core_dev *mdev, u32 *speed)
bool ext; bool ext;
int err; int err;
ext = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet); ext = mlx5e_ptys_ext_supported(mdev);
err = mlx5_port_query_eth_proto(mdev, 1, ext, &eproto); err = mlx5_port_query_eth_proto(mdev, 1, ext, &eproto);
if (err) if (err)
goto out; goto out;
...@@ -205,7 +220,7 @@ int mlx5e_port_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed) ...@@ -205,7 +220,7 @@ int mlx5e_port_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed)
int err; int err;
int i; int i;
ext = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet); ext = mlx5e_ptys_ext_supported(mdev);
err = mlx5_port_query_eth_proto(mdev, 1, ext, &eproto); err = mlx5_port_query_eth_proto(mdev, 1, ext, &eproto);
if (err) if (err)
return err; return err;
......
...@@ -54,7 +54,7 @@ int mlx5e_port_linkspeed(struct mlx5_core_dev *mdev, u32 *speed); ...@@ -54,7 +54,7 @@ int mlx5e_port_linkspeed(struct mlx5_core_dev *mdev, u32 *speed);
int mlx5e_port_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed); int mlx5e_port_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed);
u32 mlx5e_port_speed2linkmodes(struct mlx5_core_dev *mdev, u32 speed, u32 mlx5e_port_speed2linkmodes(struct mlx5_core_dev *mdev, u32 speed,
bool force_legacy); bool force_legacy);
bool mlx5e_ptys_ext_supported(struct mlx5_core_dev *mdev);
int mlx5e_port_query_pbmc(struct mlx5_core_dev *mdev, void *out); int mlx5e_port_query_pbmc(struct mlx5_core_dev *mdev, void *out);
int mlx5e_port_set_pbmc(struct mlx5_core_dev *mdev, void *in); int mlx5e_port_set_pbmc(struct mlx5_core_dev *mdev, void *in);
int mlx5e_port_query_priority2buffer(struct mlx5_core_dev *mdev, u8 *buffer); int mlx5e_port_query_priority2buffer(struct mlx5_core_dev *mdev, u8 *buffer);
......
...@@ -34,6 +34,7 @@ ...@@ -34,6 +34,7 @@
int mlx5e_port_query_buffer(struct mlx5e_priv *priv, int mlx5e_port_query_buffer(struct mlx5e_priv *priv,
struct mlx5e_port_buffer *port_buffer) struct mlx5e_port_buffer *port_buffer)
{ {
u16 port_buff_cell_sz = priv->dcbx.port_buff_cell_sz;
struct mlx5_core_dev *mdev = priv->mdev; struct mlx5_core_dev *mdev = priv->mdev;
int sz = MLX5_ST_SZ_BYTES(pbmc_reg); int sz = MLX5_ST_SZ_BYTES(pbmc_reg);
u32 total_used = 0; u32 total_used = 0;
...@@ -57,11 +58,11 @@ int mlx5e_port_query_buffer(struct mlx5e_priv *priv, ...@@ -57,11 +58,11 @@ int mlx5e_port_query_buffer(struct mlx5e_priv *priv,
port_buffer->buffer[i].epsb = port_buffer->buffer[i].epsb =
MLX5_GET(bufferx_reg, buffer, epsb); MLX5_GET(bufferx_reg, buffer, epsb);
port_buffer->buffer[i].size = port_buffer->buffer[i].size =
MLX5_GET(bufferx_reg, buffer, size) << MLX5E_BUFFER_CELL_SHIFT; MLX5_GET(bufferx_reg, buffer, size) * port_buff_cell_sz;
port_buffer->buffer[i].xon = port_buffer->buffer[i].xon =
MLX5_GET(bufferx_reg, buffer, xon_threshold) << MLX5E_BUFFER_CELL_SHIFT; MLX5_GET(bufferx_reg, buffer, xon_threshold) * port_buff_cell_sz;
port_buffer->buffer[i].xoff = port_buffer->buffer[i].xoff =
MLX5_GET(bufferx_reg, buffer, xoff_threshold) << MLX5E_BUFFER_CELL_SHIFT; MLX5_GET(bufferx_reg, buffer, xoff_threshold) * port_buff_cell_sz;
total_used += port_buffer->buffer[i].size; total_used += port_buffer->buffer[i].size;
mlx5e_dbg(HW, priv, "buffer %d: size=%d, xon=%d, xoff=%d, epsb=%d, lossy=%d\n", i, mlx5e_dbg(HW, priv, "buffer %d: size=%d, xon=%d, xoff=%d, epsb=%d, lossy=%d\n", i,
...@@ -73,7 +74,7 @@ int mlx5e_port_query_buffer(struct mlx5e_priv *priv, ...@@ -73,7 +74,7 @@ int mlx5e_port_query_buffer(struct mlx5e_priv *priv,
} }
port_buffer->port_buffer_size = port_buffer->port_buffer_size =
MLX5_GET(pbmc_reg, out, port_buffer_size) << MLX5E_BUFFER_CELL_SHIFT; MLX5_GET(pbmc_reg, out, port_buffer_size) * port_buff_cell_sz;
port_buffer->spare_buffer_size = port_buffer->spare_buffer_size =
port_buffer->port_buffer_size - total_used; port_buffer->port_buffer_size - total_used;
...@@ -88,9 +89,9 @@ int mlx5e_port_query_buffer(struct mlx5e_priv *priv, ...@@ -88,9 +89,9 @@ int mlx5e_port_query_buffer(struct mlx5e_priv *priv,
static int port_set_buffer(struct mlx5e_priv *priv, static int port_set_buffer(struct mlx5e_priv *priv,
struct mlx5e_port_buffer *port_buffer) struct mlx5e_port_buffer *port_buffer)
{ {
u16 port_buff_cell_sz = priv->dcbx.port_buff_cell_sz;
struct mlx5_core_dev *mdev = priv->mdev; struct mlx5_core_dev *mdev = priv->mdev;
int sz = MLX5_ST_SZ_BYTES(pbmc_reg); int sz = MLX5_ST_SZ_BYTES(pbmc_reg);
void *buffer;
void *in; void *in;
int err; int err;
int i; int i;
...@@ -104,16 +105,18 @@ static int port_set_buffer(struct mlx5e_priv *priv, ...@@ -104,16 +105,18 @@ static int port_set_buffer(struct mlx5e_priv *priv,
goto out; goto out;
for (i = 0; i < MLX5E_MAX_BUFFER; i++) { for (i = 0; i < MLX5E_MAX_BUFFER; i++) {
buffer = MLX5_ADDR_OF(pbmc_reg, in, buffer[i]); void *buffer = MLX5_ADDR_OF(pbmc_reg, in, buffer[i]);
u64 size = port_buffer->buffer[i].size;
MLX5_SET(bufferx_reg, buffer, size, u64 xoff = port_buffer->buffer[i].xoff;
port_buffer->buffer[i].size >> MLX5E_BUFFER_CELL_SHIFT); u64 xon = port_buffer->buffer[i].xon;
MLX5_SET(bufferx_reg, buffer, lossy,
port_buffer->buffer[i].lossy); do_div(size, port_buff_cell_sz);
MLX5_SET(bufferx_reg, buffer, xoff_threshold, do_div(xoff, port_buff_cell_sz);
port_buffer->buffer[i].xoff >> MLX5E_BUFFER_CELL_SHIFT); do_div(xon, port_buff_cell_sz);
MLX5_SET(bufferx_reg, buffer, xon_threshold, MLX5_SET(bufferx_reg, buffer, size, size);
port_buffer->buffer[i].xon >> MLX5E_BUFFER_CELL_SHIFT); MLX5_SET(bufferx_reg, buffer, lossy, port_buffer->buffer[i].lossy);
MLX5_SET(bufferx_reg, buffer, xoff_threshold, xoff);
MLX5_SET(bufferx_reg, buffer, xon_threshold, xon);
} }
err = mlx5e_port_set_pbmc(mdev, in); err = mlx5e_port_set_pbmc(mdev, in);
...@@ -143,7 +146,7 @@ static u32 calculate_xoff(struct mlx5e_priv *priv, unsigned int mtu) ...@@ -143,7 +146,7 @@ static u32 calculate_xoff(struct mlx5e_priv *priv, unsigned int mtu)
} }
static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer, static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer,
u32 xoff, unsigned int max_mtu) u32 xoff, unsigned int max_mtu, u16 port_buff_cell_sz)
{ {
int i; int i;
...@@ -155,7 +158,7 @@ static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer, ...@@ -155,7 +158,7 @@ static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer,
} }
if (port_buffer->buffer[i].size < if (port_buffer->buffer[i].size <
(xoff + max_mtu + (1 << MLX5E_BUFFER_CELL_SHIFT))) { (xoff + max_mtu + port_buff_cell_sz)) {
pr_err("buffer_size[%d]=%d is not enough for lossless buffer\n", pr_err("buffer_size[%d]=%d is not enough for lossless buffer\n",
i, port_buffer->buffer[i].size); i, port_buffer->buffer[i].size);
return -ENOMEM; return -ENOMEM;
...@@ -175,6 +178,7 @@ static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer, ...@@ -175,6 +178,7 @@ static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer,
* @pfc_en: <input> current pfc configuration * @pfc_en: <input> current pfc configuration
* @buffer: <input> current prio to buffer mapping * @buffer: <input> current prio to buffer mapping
* @xoff: <input> xoff value * @xoff: <input> xoff value
* @port_buff_cell_sz: <input> port buffer cell_size
* @port_buffer: <output> port receive buffer configuration * @port_buffer: <output> port receive buffer configuration
* @change: <output> * @change: <output>
* *
...@@ -189,7 +193,7 @@ static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer, ...@@ -189,7 +193,7 @@ static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer,
* sets change to true if buffer configuration was modified. * sets change to true if buffer configuration was modified.
*/ */
static int update_buffer_lossy(unsigned int max_mtu, static int update_buffer_lossy(unsigned int max_mtu,
u8 pfc_en, u8 *buffer, u32 xoff, u8 pfc_en, u8 *buffer, u32 xoff, u16 port_buff_cell_sz,
struct mlx5e_port_buffer *port_buffer, struct mlx5e_port_buffer *port_buffer,
bool *change) bool *change)
{ {
...@@ -225,7 +229,7 @@ static int update_buffer_lossy(unsigned int max_mtu, ...@@ -225,7 +229,7 @@ static int update_buffer_lossy(unsigned int max_mtu,
} }
if (changed) { if (changed) {
err = update_xoff_threshold(port_buffer, xoff, max_mtu); err = update_xoff_threshold(port_buffer, xoff, max_mtu, port_buff_cell_sz);
if (err) if (err)
return err; return err;
...@@ -262,6 +266,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv, ...@@ -262,6 +266,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
u32 *buffer_size, u32 *buffer_size,
u8 *prio2buffer) u8 *prio2buffer)
{ {
u16 port_buff_cell_sz = priv->dcbx.port_buff_cell_sz;
struct mlx5e_port_buffer port_buffer; struct mlx5e_port_buffer port_buffer;
u32 xoff = calculate_xoff(priv, mtu); u32 xoff = calculate_xoff(priv, mtu);
bool update_prio2buffer = false; bool update_prio2buffer = false;
...@@ -282,7 +287,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv, ...@@ -282,7 +287,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
if (change & MLX5E_PORT_BUFFER_CABLE_LEN) { if (change & MLX5E_PORT_BUFFER_CABLE_LEN) {
update_buffer = true; update_buffer = true;
err = update_xoff_threshold(&port_buffer, xoff, max_mtu); err = update_xoff_threshold(&port_buffer, xoff, max_mtu, port_buff_cell_sz);
if (err) if (err)
return err; return err;
} }
...@@ -292,7 +297,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv, ...@@ -292,7 +297,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
if (err) if (err)
return err; return err;
err = update_buffer_lossy(max_mtu, pfc->pfc_en, buffer, xoff, err = update_buffer_lossy(max_mtu, pfc->pfc_en, buffer, xoff, port_buff_cell_sz,
&port_buffer, &update_buffer); &port_buffer, &update_buffer);
if (err) if (err)
return err; return err;
...@@ -304,7 +309,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv, ...@@ -304,7 +309,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
if (err) if (err)
return err; return err;
err = update_buffer_lossy(max_mtu, curr_pfc_en, prio2buffer, err = update_buffer_lossy(max_mtu, curr_pfc_en, prio2buffer, port_buff_cell_sz,
xoff, &port_buffer, &update_buffer); xoff, &port_buffer, &update_buffer);
if (err) if (err)
return err; return err;
...@@ -329,7 +334,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv, ...@@ -329,7 +334,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
return -EINVAL; return -EINVAL;
update_buffer = true; update_buffer = true;
err = update_xoff_threshold(&port_buffer, xoff, max_mtu); err = update_xoff_threshold(&port_buffer, xoff, max_mtu, port_buff_cell_sz);
if (err) if (err)
return err; return err;
} }
...@@ -337,7 +342,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv, ...@@ -337,7 +342,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
/* Need to update buffer configuration if xoff value is changed */ /* Need to update buffer configuration if xoff value is changed */
if (!update_buffer && xoff != priv->dcbx.xoff) { if (!update_buffer && xoff != priv->dcbx.xoff) {
update_buffer = true; update_buffer = true;
err = update_xoff_threshold(&port_buffer, xoff, max_mtu); err = update_xoff_threshold(&port_buffer, xoff, max_mtu, port_buff_cell_sz);
if (err) if (err)
return err; return err;
} }
......
...@@ -36,7 +36,6 @@ ...@@ -36,7 +36,6 @@
#include "port.h" #include "port.h"
#define MLX5E_MAX_BUFFER 8 #define MLX5E_MAX_BUFFER 8
#define MLX5E_BUFFER_CELL_SHIFT 7
#define MLX5E_DEFAULT_CABLE_LEN 7 /* 7 meters */ #define MLX5E_DEFAULT_CABLE_LEN 7 /* 7 meters */
#define MLX5_BUFFER_SUPPORTED(mdev) (MLX5_CAP_GEN(mdev, pcam_reg) && \ #define MLX5_BUFFER_SUPPORTED(mdev) (MLX5_CAP_GEN(mdev, pcam_reg) && \
......
...@@ -1097,6 +1097,7 @@ mlx5_tc_ct_flush_ft_entry(void *ptr, void *arg) ...@@ -1097,6 +1097,7 @@ mlx5_tc_ct_flush_ft_entry(void *ptr, void *arg)
struct mlx5_ct_entry *entry = ptr; struct mlx5_ct_entry *entry = ptr;
mlx5_tc_ct_entry_del_rules(ct_priv, entry); mlx5_tc_ct_entry_del_rules(ct_priv, entry);
kfree(entry);
} }
static void static void
......
...@@ -1217,6 +1217,24 @@ static int mlx5e_trust_initialize(struct mlx5e_priv *priv) ...@@ -1217,6 +1217,24 @@ static int mlx5e_trust_initialize(struct mlx5e_priv *priv)
return 0; return 0;
} }
#define MLX5E_BUFFER_CELL_SHIFT 7
static u16 mlx5e_query_port_buffers_cell_size(struct mlx5e_priv *priv)
{
struct mlx5_core_dev *mdev = priv->mdev;
u32 out[MLX5_ST_SZ_DW(sbcam_reg)] = {};
u32 in[MLX5_ST_SZ_DW(sbcam_reg)] = {};
if (!MLX5_CAP_GEN(mdev, sbcam_reg))
return (1 << MLX5E_BUFFER_CELL_SHIFT);
if (mlx5_core_access_reg(mdev, in, sizeof(in), out, sizeof(out),
MLX5_REG_SBCAM, 0, 0))
return (1 << MLX5E_BUFFER_CELL_SHIFT);
return MLX5_GET(sbcam_reg, out, cap_cell_size);
}
void mlx5e_dcbnl_initialize(struct mlx5e_priv *priv) void mlx5e_dcbnl_initialize(struct mlx5e_priv *priv)
{ {
struct mlx5e_dcbx *dcbx = &priv->dcbx; struct mlx5e_dcbx *dcbx = &priv->dcbx;
...@@ -1234,6 +1252,7 @@ void mlx5e_dcbnl_initialize(struct mlx5e_priv *priv) ...@@ -1234,6 +1252,7 @@ void mlx5e_dcbnl_initialize(struct mlx5e_priv *priv)
if (priv->dcbx.mode == MLX5E_DCBX_PARAM_VER_OPER_HOST) if (priv->dcbx.mode == MLX5E_DCBX_PARAM_VER_OPER_HOST)
priv->dcbx.cap |= DCB_CAP_DCBX_HOST; priv->dcbx.cap |= DCB_CAP_DCBX_HOST;
priv->dcbx.port_buff_cell_sz = mlx5e_query_port_buffers_cell_size(priv);
priv->dcbx.manual_buffer = false; priv->dcbx.manual_buffer = false;
priv->dcbx.cable_len = MLX5E_DEFAULT_CABLE_LEN; priv->dcbx.cable_len = MLX5E_DEFAULT_CABLE_LEN;
......
...@@ -200,7 +200,7 @@ static void mlx5e_ethtool_get_speed_arr(struct mlx5_core_dev *mdev, ...@@ -200,7 +200,7 @@ static void mlx5e_ethtool_get_speed_arr(struct mlx5_core_dev *mdev,
struct ptys2ethtool_config **arr, struct ptys2ethtool_config **arr,
u32 *size) u32 *size)
{ {
bool ext = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet); bool ext = mlx5e_ptys_ext_supported(mdev);
*arr = ext ? ptys2ext_ethtool_table : ptys2legacy_ethtool_table; *arr = ext ? ptys2ext_ethtool_table : ptys2legacy_ethtool_table;
*size = ext ? ARRAY_SIZE(ptys2ext_ethtool_table) : *size = ext ? ARRAY_SIZE(ptys2ext_ethtool_table) :
...@@ -883,7 +883,7 @@ static void get_lp_advertising(struct mlx5_core_dev *mdev, u32 eth_proto_lp, ...@@ -883,7 +883,7 @@ static void get_lp_advertising(struct mlx5_core_dev *mdev, u32 eth_proto_lp,
struct ethtool_link_ksettings *link_ksettings) struct ethtool_link_ksettings *link_ksettings)
{ {
unsigned long *lp_advertising = link_ksettings->link_modes.lp_advertising; unsigned long *lp_advertising = link_ksettings->link_modes.lp_advertising;
bool ext = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet); bool ext = mlx5e_ptys_ext_supported(mdev);
ptys2ethtool_adver_link(lp_advertising, eth_proto_lp, ext); ptys2ethtool_adver_link(lp_advertising, eth_proto_lp, ext);
} }
...@@ -913,7 +913,7 @@ int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv, ...@@ -913,7 +913,7 @@ int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv,
__func__, err); __func__, err);
goto err_query_regs; goto err_query_regs;
} }
ext = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet); ext = !!MLX5_GET_ETH_PROTO(ptys_reg, out, true, eth_proto_capability);
eth_proto_cap = MLX5_GET_ETH_PROTO(ptys_reg, out, ext, eth_proto_cap = MLX5_GET_ETH_PROTO(ptys_reg, out, ext,
eth_proto_capability); eth_proto_capability);
eth_proto_admin = MLX5_GET_ETH_PROTO(ptys_reg, out, ext, eth_proto_admin = MLX5_GET_ETH_PROTO(ptys_reg, out, ext,
...@@ -1066,7 +1066,7 @@ int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv, ...@@ -1066,7 +1066,7 @@ int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv,
autoneg = link_ksettings->base.autoneg; autoneg = link_ksettings->base.autoneg;
speed = link_ksettings->base.speed; speed = link_ksettings->base.speed;
ext_supported = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet); ext_supported = mlx5e_ptys_ext_supported(mdev);
ext = ext_requested(autoneg, adver, ext_supported); ext = ext_requested(autoneg, adver, ext_supported);
if (!ext_supported && ext) if (!ext_supported && ext)
return -EOPNOTSUPP; return -EOPNOTSUPP;
......
...@@ -3104,9 +3104,6 @@ int mlx5e_open(struct net_device *netdev) ...@@ -3104,9 +3104,6 @@ int mlx5e_open(struct net_device *netdev)
mlx5_set_port_admin_status(priv->mdev, MLX5_PORT_UP); mlx5_set_port_admin_status(priv->mdev, MLX5_PORT_UP);
mutex_unlock(&priv->state_lock); mutex_unlock(&priv->state_lock);
if (mlx5_vxlan_allowed(priv->mdev->vxlan))
udp_tunnel_get_rx_info(netdev);
return err; return err;
} }
...@@ -5121,6 +5118,10 @@ static int mlx5e_init_nic_rx(struct mlx5e_priv *priv) ...@@ -5121,6 +5118,10 @@ static int mlx5e_init_nic_rx(struct mlx5e_priv *priv)
if (err) if (err)
goto err_destroy_flow_steering; goto err_destroy_flow_steering;
#ifdef CONFIG_MLX5_EN_ARFS
priv->netdev->rx_cpu_rmap = mlx5_eq_table_get_rmap(priv->mdev);
#endif
return 0; return 0;
err_destroy_flow_steering: err_destroy_flow_steering:
...@@ -5202,6 +5203,8 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv) ...@@ -5202,6 +5203,8 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv)
rtnl_lock(); rtnl_lock();
if (netif_running(netdev)) if (netif_running(netdev))
mlx5e_open(netdev); mlx5e_open(netdev);
if (mlx5_vxlan_allowed(priv->mdev->vxlan))
udp_tunnel_get_rx_info(netdev);
netif_device_attach(netdev); netif_device_attach(netdev);
rtnl_unlock(); rtnl_unlock();
} }
...@@ -5216,6 +5219,8 @@ static void mlx5e_nic_disable(struct mlx5e_priv *priv) ...@@ -5216,6 +5219,8 @@ static void mlx5e_nic_disable(struct mlx5e_priv *priv)
rtnl_lock(); rtnl_lock();
if (netif_running(priv->netdev)) if (netif_running(priv->netdev))
mlx5e_close(priv->netdev); mlx5e_close(priv->netdev);
if (mlx5_vxlan_allowed(priv->mdev->vxlan))
udp_tunnel_drop_rx_info(priv->netdev);
netif_device_detach(priv->netdev); netif_device_detach(priv->netdev);
rtnl_unlock(); rtnl_unlock();
...@@ -5288,10 +5293,6 @@ int mlx5e_netdev_init(struct net_device *netdev, ...@@ -5288,10 +5293,6 @@ int mlx5e_netdev_init(struct net_device *netdev,
/* netdev init */ /* netdev init */
netif_carrier_off(netdev); netif_carrier_off(netdev);
#ifdef CONFIG_MLX5_EN_ARFS
netdev->rx_cpu_rmap = mlx5_eq_table_get_rmap(mdev);
#endif
return 0; return 0;
err_free_cpumask: err_free_cpumask:
......
...@@ -4670,9 +4670,10 @@ static bool is_flow_rule_duplicate_allowed(struct net_device *dev, ...@@ -4670,9 +4670,10 @@ static bool is_flow_rule_duplicate_allowed(struct net_device *dev,
struct mlx5e_rep_priv *rpriv) struct mlx5e_rep_priv *rpriv)
{ {
/* Offloaded flow rule is allowed to duplicate on non-uplink representor /* Offloaded flow rule is allowed to duplicate on non-uplink representor
* sharing tc block with other slaves of a lag device. * sharing tc block with other slaves of a lag device. Rpriv can be NULL if this
* function is called from NIC mode.
*/ */
return netif_is_lag_port(dev) && rpriv->rep->vport != MLX5_VPORT_UPLINK; return netif_is_lag_port(dev) && rpriv && rpriv->rep->vport != MLX5_VPORT_UPLINK;
} }
int mlx5e_configure_flower(struct net_device *dev, struct mlx5e_priv *priv, int mlx5e_configure_flower(struct net_device *dev, struct mlx5e_priv *priv,
...@@ -4686,13 +4687,12 @@ int mlx5e_configure_flower(struct net_device *dev, struct mlx5e_priv *priv, ...@@ -4686,13 +4687,12 @@ int mlx5e_configure_flower(struct net_device *dev, struct mlx5e_priv *priv,
rcu_read_lock(); rcu_read_lock();
flow = rhashtable_lookup(tc_ht, &f->cookie, tc_ht_params); flow = rhashtable_lookup(tc_ht, &f->cookie, tc_ht_params);
rcu_read_unlock();
if (flow) { if (flow) {
/* Same flow rule offloaded to non-uplink representor sharing tc block, /* Same flow rule offloaded to non-uplink representor sharing tc block,
* just return 0. * just return 0.
*/ */
if (is_flow_rule_duplicate_allowed(dev, rpriv) && flow->orig_dev != dev) if (is_flow_rule_duplicate_allowed(dev, rpriv) && flow->orig_dev != dev)
goto out; goto rcu_unlock;
NL_SET_ERR_MSG_MOD(extack, NL_SET_ERR_MSG_MOD(extack,
"flow cookie already exists, ignoring"); "flow cookie already exists, ignoring");
...@@ -4700,8 +4700,12 @@ int mlx5e_configure_flower(struct net_device *dev, struct mlx5e_priv *priv, ...@@ -4700,8 +4700,12 @@ int mlx5e_configure_flower(struct net_device *dev, struct mlx5e_priv *priv,
"flow cookie %lx already exists, ignoring\n", "flow cookie %lx already exists, ignoring\n",
f->cookie); f->cookie);
err = -EEXIST; err = -EEXIST;
goto out; goto rcu_unlock;
} }
rcu_unlock:
rcu_read_unlock();
if (flow)
goto out;
trace_mlx5e_configure_flower(f); trace_mlx5e_configure_flower(f);
err = mlx5e_tc_add_flow(priv, f, flags, dev, &flow); err = mlx5e_tc_add_flow(priv, f, flags, dev, &flow);
......
...@@ -217,7 +217,6 @@ int esw_acl_ingress_lgcy_setup(struct mlx5_eswitch *esw, ...@@ -217,7 +217,6 @@ int esw_acl_ingress_lgcy_setup(struct mlx5_eswitch *esw,
} }
/* Create ingress allow rule */ /* Create ingress allow rule */
memset(spec, 0, sizeof(*spec));
spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW; flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW;
vport->ingress.allow_rule = mlx5_add_flow_rules(vport->ingress.acl, spec, vport->ingress.allow_rule = mlx5_add_flow_rules(vport->ingress.acl, spec,
......
...@@ -293,7 +293,40 @@ static int mlx5_query_module_num(struct mlx5_core_dev *dev, int *module_num) ...@@ -293,7 +293,40 @@ static int mlx5_query_module_num(struct mlx5_core_dev *dev, int *module_num)
return 0; return 0;
} }
static int mlx5_eeprom_page(int offset) static int mlx5_query_module_id(struct mlx5_core_dev *dev, int module_num,
u8 *module_id)
{
u32 in[MLX5_ST_SZ_DW(mcia_reg)] = {};
u32 out[MLX5_ST_SZ_DW(mcia_reg)];
int err, status;
u8 *ptr;
MLX5_SET(mcia_reg, in, i2c_device_address, MLX5_I2C_ADDR_LOW);
MLX5_SET(mcia_reg, in, module, module_num);
MLX5_SET(mcia_reg, in, device_address, 0);
MLX5_SET(mcia_reg, in, page_number, 0);
MLX5_SET(mcia_reg, in, size, 1);
MLX5_SET(mcia_reg, in, l, 0);
err = mlx5_core_access_reg(dev, in, sizeof(in), out,
sizeof(out), MLX5_REG_MCIA, 0, 0);
if (err)
return err;
status = MLX5_GET(mcia_reg, out, status);
if (status) {
mlx5_core_err(dev, "query_mcia_reg failed: status: 0x%x\n",
status);
return -EIO;
}
ptr = MLX5_ADDR_OF(mcia_reg, out, dword_0);
*module_id = ptr[0];
return 0;
}
static int mlx5_qsfp_eeprom_page(u16 offset)
{ {
if (offset < MLX5_EEPROM_PAGE_LENGTH) if (offset < MLX5_EEPROM_PAGE_LENGTH)
/* Addresses between 0-255 - page 00 */ /* Addresses between 0-255 - page 00 */
...@@ -307,7 +340,7 @@ static int mlx5_eeprom_page(int offset) ...@@ -307,7 +340,7 @@ static int mlx5_eeprom_page(int offset)
MLX5_EEPROM_HIGH_PAGE_LENGTH); MLX5_EEPROM_HIGH_PAGE_LENGTH);
} }
static int mlx5_eeprom_high_page_offset(int page_num) static int mlx5_qsfp_eeprom_high_page_offset(int page_num)
{ {
if (!page_num) /* Page 0 always start from low page */ if (!page_num) /* Page 0 always start from low page */
return 0; return 0;
...@@ -316,35 +349,62 @@ static int mlx5_eeprom_high_page_offset(int page_num) ...@@ -316,35 +349,62 @@ static int mlx5_eeprom_high_page_offset(int page_num)
return page_num * MLX5_EEPROM_HIGH_PAGE_LENGTH; return page_num * MLX5_EEPROM_HIGH_PAGE_LENGTH;
} }
static void mlx5_qsfp_eeprom_params_set(u16 *i2c_addr, int *page_num, u16 *offset)
{
*i2c_addr = MLX5_I2C_ADDR_LOW;
*page_num = mlx5_qsfp_eeprom_page(*offset);
*offset -= mlx5_qsfp_eeprom_high_page_offset(*page_num);
}
static void mlx5_sfp_eeprom_params_set(u16 *i2c_addr, int *page_num, u16 *offset)
{
*i2c_addr = MLX5_I2C_ADDR_LOW;
*page_num = 0;
if (*offset < MLX5_EEPROM_PAGE_LENGTH)
return;
*i2c_addr = MLX5_I2C_ADDR_HIGH;
*offset -= MLX5_EEPROM_PAGE_LENGTH;
}
int mlx5_query_module_eeprom(struct mlx5_core_dev *dev, int mlx5_query_module_eeprom(struct mlx5_core_dev *dev,
u16 offset, u16 size, u8 *data) u16 offset, u16 size, u8 *data)
{ {
int module_num, page_num, status, err; int module_num, status, err, page_num = 0;
u32 in[MLX5_ST_SZ_DW(mcia_reg)] = {};
u32 out[MLX5_ST_SZ_DW(mcia_reg)]; u32 out[MLX5_ST_SZ_DW(mcia_reg)];
u32 in[MLX5_ST_SZ_DW(mcia_reg)]; u16 i2c_addr = 0;
u16 i2c_addr; u8 module_id;
void *ptr = MLX5_ADDR_OF(mcia_reg, out, dword_0); void *ptr;
err = mlx5_query_module_num(dev, &module_num); err = mlx5_query_module_num(dev, &module_num);
if (err) if (err)
return err; return err;
memset(in, 0, sizeof(in)); err = mlx5_query_module_id(dev, module_num, &module_id);
size = min_t(int, size, MLX5_EEPROM_MAX_BYTES); if (err)
return err;
/* Get the page number related to the given offset */
page_num = mlx5_eeprom_page(offset);
/* Set the right offset according to the page number, switch (module_id) {
* For page_num > 0, relative offset is always >= 128 (high page). case MLX5_MODULE_ID_SFP:
*/ mlx5_sfp_eeprom_params_set(&i2c_addr, &page_num, &offset);
offset -= mlx5_eeprom_high_page_offset(page_num); break;
case MLX5_MODULE_ID_QSFP:
case MLX5_MODULE_ID_QSFP_PLUS:
case MLX5_MODULE_ID_QSFP28:
mlx5_qsfp_eeprom_params_set(&i2c_addr, &page_num, &offset);
break;
default:
mlx5_core_err(dev, "Module ID not recognized: 0x%x\n", module_id);
return -EINVAL;
}
if (offset + size > MLX5_EEPROM_PAGE_LENGTH) if (offset + size > MLX5_EEPROM_PAGE_LENGTH)
/* Cross pages read, read until offset 256 in low page */ /* Cross pages read, read until offset 256 in low page */
size -= offset + size - MLX5_EEPROM_PAGE_LENGTH; size -= offset + size - MLX5_EEPROM_PAGE_LENGTH;
i2c_addr = MLX5_I2C_ADDR_LOW; size = min_t(int, size, MLX5_EEPROM_MAX_BYTES);
MLX5_SET(mcia_reg, in, l, 0); MLX5_SET(mcia_reg, in, l, 0);
MLX5_SET(mcia_reg, in, module, module_num); MLX5_SET(mcia_reg, in, module, module_num);
...@@ -365,6 +425,7 @@ int mlx5_query_module_eeprom(struct mlx5_core_dev *dev, ...@@ -365,6 +425,7 @@ int mlx5_query_module_eeprom(struct mlx5_core_dev *dev,
return -EIO; return -EIO;
} }
ptr = MLX5_ADDR_OF(mcia_reg, out, dword_0);
memcpy(data, ptr, size); memcpy(data, ptr, size);
return size; return size;
......
...@@ -147,6 +147,7 @@ enum { ...@@ -147,6 +147,7 @@ enum {
MLX5_REG_MCDA = 0x9063, MLX5_REG_MCDA = 0x9063,
MLX5_REG_MCAM = 0x907f, MLX5_REG_MCAM = 0x907f,
MLX5_REG_MIRC = 0x9162, MLX5_REG_MIRC = 0x9162,
MLX5_REG_SBCAM = 0xB01F,
MLX5_REG_RESOURCE_DUMP = 0xC000, MLX5_REG_RESOURCE_DUMP = 0xC000,
}; };
......
...@@ -9960,6 +9960,34 @@ struct mlx5_ifc_pptb_reg_bits { ...@@ -9960,6 +9960,34 @@ struct mlx5_ifc_pptb_reg_bits {
u8 untagged_buff[0x4]; u8 untagged_buff[0x4];
}; };
struct mlx5_ifc_sbcam_reg_bits {
u8 reserved_at_0[0x8];
u8 feature_group[0x8];
u8 reserved_at_10[0x8];
u8 access_reg_group[0x8];
u8 reserved_at_20[0x20];
u8 sb_access_reg_cap_mask[4][0x20];
u8 reserved_at_c0[0x80];
u8 sb_feature_cap_mask[4][0x20];
u8 reserved_at_1c0[0x40];
u8 cap_total_buffer_size[0x20];
u8 cap_cell_size[0x10];
u8 cap_max_pg_buffers[0x8];
u8 cap_num_pool_supported[0x8];
u8 reserved_at_240[0x8];
u8 cap_sbsr_stat_size[0x8];
u8 cap_max_tclass_data[0x8];
u8 cap_max_cpu_ingress_tclass_sb[0x8];
};
struct mlx5_ifc_pbmc_reg_bits { struct mlx5_ifc_pbmc_reg_bits {
u8 reserved_at_0[0x8]; u8 reserved_at_0[0x8];
u8 local_port[0x8]; u8 local_port[0x8];
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment