Commit dc3a2001 authored by David S. Miller's avatar David S. Miller

Merge tag 'mlx5-updates-2022-05-09' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saeed  Mahameed says:

====================
mlx5-updates-2022-05-09

1) Gavin Li, adds exit route from waiting for FW init on device boot and
   increases FW init timeout on health recovery flow

2) Support 4 ports HCAs LAG mode

Mark Bloch Says:
================

This series adds to mlx5 drivers support for 4 ports HCAs.
Starting with ConnectX-7 HCAs with 4 ports are possible.

As most driver parts aren't affected by such configuration most driver
code is unchanged.

Specially the only affected areas are:
- Lag
- Devcom
- Merged E-Switch
- Single FDB E-Switch

Lag was chosen to be converted first. Creating hardware LAG when all 4
ports are added to the same bond device.

Devom, merge E-Switch and single FDB E-Switch, are marked as supporting
only 2 ports HCAs and future patches will add support for 4 ports HCAs.

In order to activate the hardware lag a user can execute the:

ip link add bond0 type bond
ip link set bond0 type bond miimon 100 mode 2
ip link set eth2 master bond0
ip link set eth3 master bond0
ip link set eth4 master bond0
ip link set eth5 master bond0

Where eth2, eth3, eth4 and eth5 are the PFs of the same HCA.

================

====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 53a332f2 7f46a0b7
......@@ -100,7 +100,7 @@ int mlx5_ib_create_gsi(struct ib_pd *pd, struct mlx5_ib_qp *mqp,
port_type) == MLX5_CAP_PORT_TYPE_IB)
num_qps = pd->device->attrs.max_pkeys;
else if (dev->lag_active)
num_qps = MLX5_MAX_PORTS;
num_qps = dev->lag_ports;
}
gsi = &mqp->gsi;
......
......@@ -2991,6 +2991,7 @@ static int mlx5_eth_lag_init(struct mlx5_ib_dev *dev)
}
dev->flow_db->lag_demux_ft = ft;
dev->lag_ports = mlx5_lag_get_num_ports(mdev);
dev->lag_active = true;
return 0;
......
......@@ -1131,6 +1131,7 @@ struct mlx5_ib_dev {
struct xarray sig_mrs;
struct mlx5_port_caps port_caps[MLX5_MAX_PORTS];
u16 pkey_table_len;
u8 lag_ports;
};
static inline struct mlx5_ib_cq *to_mibcq(struct mlx5_core_cq *mcq)
......
......@@ -3907,7 +3907,7 @@ static unsigned int get_tx_affinity_rr(struct mlx5_ib_dev *dev,
tx_port_affinity = &dev->port[port_num].roce.tx_port_affinity;
return (unsigned int)atomic_add_return(1, tx_port_affinity) %
MLX5_MAX_PORTS + 1;
(dev->lag_active ? dev->lag_ports : MLX5_CAP_GEN(dev->mdev, num_lag_ports)) + 1;
}
static bool qp_supports_affinity(struct mlx5_ib_qp *qp)
......
......@@ -14,7 +14,7 @@ obj-$(CONFIG_MLX5_CORE) += mlx5_core.o
mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \
health.o mcg.o cq.o alloc.o port.o mr.o pd.o \
transobj.o vport.o sriov.o fs_cmd.o fs_core.o pci_irq.o \
fs_counters.o fs_ft_pool.o rl.o lag/lag.o dev.o events.o wq.o lib/gid.o \
fs_counters.o fs_ft_pool.o rl.o lag/debugfs.o lag/lag.o dev.o events.o wq.o lib/gid.o \
lib/devcom.o lib/pci_vsc.o lib/dm.o lib/fs_ttc.o diag/fs_tracepoint.o \
diag/fw_tracer.o diag/crdump.o devlink.o diag/rsc_dump.o \
fw_reset.o qos.o lib/tout.o
......
......@@ -555,12 +555,9 @@ static u32 mlx5_gen_pci_id(const struct mlx5_core_dev *dev)
PCI_SLOT(dev->pdev->devfn));
}
static int next_phys_dev(struct device *dev, const void *data)
static int _next_phys_dev(struct mlx5_core_dev *mdev,
const struct mlx5_core_dev *curr)
{
struct mlx5_adev *madev = container_of(dev, struct mlx5_adev, adev.dev);
struct mlx5_core_dev *mdev = madev->mdev;
const struct mlx5_core_dev *curr = data;
if (!mlx5_core_is_pf(mdev))
return 0;
......@@ -574,8 +571,30 @@ static int next_phys_dev(struct device *dev, const void *data)
return 1;
}
/* Must be called with intf_mutex held */
struct mlx5_core_dev *mlx5_get_next_phys_dev(struct mlx5_core_dev *dev)
static int next_phys_dev(struct device *dev, const void *data)
{
struct mlx5_adev *madev = container_of(dev, struct mlx5_adev, adev.dev);
struct mlx5_core_dev *mdev = madev->mdev;
return _next_phys_dev(mdev, data);
}
static int next_phys_dev_lag(struct device *dev, const void *data)
{
struct mlx5_adev *madev = container_of(dev, struct mlx5_adev, adev.dev);
struct mlx5_core_dev *mdev = madev->mdev;
if (!MLX5_CAP_GEN(mdev, vport_group_manager) ||
!MLX5_CAP_GEN(mdev, lag_master) ||
(MLX5_CAP_GEN(mdev, num_lag_ports) > MLX5_MAX_PORTS ||
MLX5_CAP_GEN(mdev, num_lag_ports) <= 1))
return 0;
return _next_phys_dev(mdev, data);
}
static struct mlx5_core_dev *mlx5_get_next_dev(struct mlx5_core_dev *dev,
int (*match)(struct device *dev, const void *data))
{
struct auxiliary_device *adev;
struct mlx5_adev *madev;
......@@ -583,7 +602,7 @@ struct mlx5_core_dev *mlx5_get_next_phys_dev(struct mlx5_core_dev *dev)
if (!mlx5_core_is_pf(dev))
return NULL;
adev = auxiliary_find_device(NULL, dev, &next_phys_dev);
adev = auxiliary_find_device(NULL, dev, match);
if (!adev)
return NULL;
......@@ -592,6 +611,20 @@ struct mlx5_core_dev *mlx5_get_next_phys_dev(struct mlx5_core_dev *dev)
return madev->mdev;
}
/* Must be called with intf_mutex held */
struct mlx5_core_dev *mlx5_get_next_phys_dev(struct mlx5_core_dev *dev)
{
lockdep_assert_held(&mlx5_intf_mutex);
return mlx5_get_next_dev(dev, &next_phys_dev);
}
/* Must be called with intf_mutex held */
struct mlx5_core_dev *mlx5_get_next_phys_dev_lag(struct mlx5_core_dev *dev)
{
lockdep_assert_held(&mlx5_intf_mutex);
return mlx5_get_next_dev(dev, &next_phys_dev_lag);
}
void mlx5_dev_list_lock(void)
{
mutex_lock(&mlx5_intf_mutex);
......
......@@ -178,13 +178,13 @@ static int mlx5_devlink_reload_up(struct devlink *devlink, enum devlink_reload_a
*actions_performed = BIT(action);
switch (action) {
case DEVLINK_RELOAD_ACTION_DRIVER_REINIT:
return mlx5_load_one(dev);
return mlx5_load_one(dev, false);
case DEVLINK_RELOAD_ACTION_FW_ACTIVATE:
if (limit == DEVLINK_RELOAD_LIMIT_NO_RESET)
break;
/* On fw_activate action, also driver is reloaded and reinit performed */
*actions_performed |= BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT);
return mlx5_load_one(dev);
return mlx5_load_one(dev, false);
default:
/* Unsupported action should not get to this function */
WARN_ON(1);
......
......@@ -1569,9 +1569,7 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
ida_init(&esw->offloads.vport_metadata_ida);
xa_init_flags(&esw->offloads.vhca_map, XA_FLAGS_ALLOC);
mutex_init(&esw->state_lock);
lockdep_register_key(&esw->mode_lock_key);
init_rwsem(&esw->mode_lock);
lockdep_set_class(&esw->mode_lock, &esw->mode_lock_key);
refcount_set(&esw->qos.refcnt, 0);
esw->enabled_vports = 0;
......@@ -1615,7 +1613,6 @@ void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw)
esw->dev->priv.eswitch = NULL;
destroy_workqueue(esw->work_queue);
WARN_ON(refcount_read(&esw->qos.refcnt));
lockdep_unregister_key(&esw->mode_lock_key);
mutex_destroy(&esw->state_lock);
WARN_ON(!xa_empty(&esw->offloads.vhca_map));
xa_destroy(&esw->offloads.vhca_map);
......@@ -1893,17 +1890,6 @@ mlx5_eswitch_get_encap_mode(const struct mlx5_core_dev *dev)
}
EXPORT_SYMBOL(mlx5_eswitch_get_encap_mode);
bool mlx5_esw_lag_prereq(struct mlx5_core_dev *dev0, struct mlx5_core_dev *dev1)
{
if ((dev0->priv.eswitch->mode == MLX5_ESWITCH_NONE &&
dev1->priv.eswitch->mode == MLX5_ESWITCH_NONE) ||
(dev0->priv.eswitch->mode == MLX5_ESWITCH_OFFLOADS &&
dev1->priv.eswitch->mode == MLX5_ESWITCH_OFFLOADS))
return true;
return false;
}
bool mlx5_esw_multipath_prereq(struct mlx5_core_dev *dev0,
struct mlx5_core_dev *dev1)
{
......@@ -2014,17 +2000,6 @@ void mlx5_esw_unlock(struct mlx5_eswitch *esw)
up_write(&esw->mode_lock);
}
/**
* mlx5_esw_lock() - Take write lock on esw mode lock
* @esw: eswitch device.
*/
void mlx5_esw_lock(struct mlx5_eswitch *esw)
{
if (!mlx5_esw_allowed(esw))
return;
down_write(&esw->mode_lock);
}
/**
* mlx5_eswitch_get_total_vports - Get total vports of the eswitch
*
......
......@@ -331,7 +331,6 @@ struct mlx5_eswitch {
u32 large_group_num;
} params;
struct blocking_notifier_head n_head;
struct lock_class_key mode_lock_key;
};
void esw_offloads_disable(struct mlx5_eswitch *esw);
......@@ -518,8 +517,6 @@ static inline bool mlx5_eswitch_vlan_actions_supported(struct mlx5_core_dev *dev
MLX5_CAP_ESW_FLOWTABLE_FDB(dev, push_vlan_2);
}
bool mlx5_esw_lag_prereq(struct mlx5_core_dev *dev0,
struct mlx5_core_dev *dev1);
bool mlx5_esw_multipath_prereq(struct mlx5_core_dev *dev0,
struct mlx5_core_dev *dev1);
......@@ -706,7 +703,6 @@ void mlx5_esw_get(struct mlx5_core_dev *dev);
void mlx5_esw_put(struct mlx5_core_dev *dev);
int mlx5_esw_try_lock(struct mlx5_eswitch *esw);
void mlx5_esw_unlock(struct mlx5_eswitch *esw);
void mlx5_esw_lock(struct mlx5_eswitch *esw);
void esw_vport_change_handle_locked(struct mlx5_vport *vport);
......@@ -724,7 +720,6 @@ static inline int mlx5_eswitch_init(struct mlx5_core_dev *dev) { return 0; }
static inline void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw) {}
static inline int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs) { return 0; }
static inline void mlx5_eswitch_disable(struct mlx5_eswitch *esw, bool clear_vf) {}
static inline bool mlx5_esw_lag_prereq(struct mlx5_core_dev *dev0, struct mlx5_core_dev *dev1) { return true; }
static inline bool mlx5_eswitch_is_funcs_handler(struct mlx5_core_dev *dev) { return false; }
static inline
int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw, u16 vport, int link_state) { return 0; }
......@@ -733,9 +728,6 @@ static inline const u32 *mlx5_esw_query_functions(struct mlx5_core_dev *dev)
return ERR_PTR(-EOPNOTSUPP);
}
static inline void mlx5_esw_unlock(struct mlx5_eswitch *esw) { return; }
static inline void mlx5_esw_lock(struct mlx5_eswitch *esw) { return; }
static inline struct mlx5_flow_handle *
esw_add_restore_rule(struct mlx5_eswitch *esw, u32 tag)
{
......
......@@ -148,7 +148,7 @@ static void mlx5_fw_reset_complete_reload(struct mlx5_core_dev *dev)
if (test_bit(MLX5_FW_RESET_FLAGS_PENDING_COMP, &fw_reset->reset_flags)) {
complete(&fw_reset->done);
} else {
mlx5_load_one(dev);
mlx5_load_one(dev, false);
devlink_remote_reload_actions_performed(priv_to_devlink(dev), 0,
BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT) |
BIT(DEVLINK_RELOAD_ACTION_FW_ACTIVATE));
......
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
#include "lag.h"
static char *get_str_mode_type(struct mlx5_lag *ldev)
{
if (ldev->flags & MLX5_LAG_FLAG_ROCE)
return "roce";
if (ldev->flags & MLX5_LAG_FLAG_SRIOV)
return "switchdev";
if (ldev->flags & MLX5_LAG_FLAG_MULTIPATH)
return "multipath";
return NULL;
}
static int type_show(struct seq_file *file, void *priv)
{
struct mlx5_core_dev *dev = file->private;
struct mlx5_lag *ldev;
char *mode = NULL;
ldev = dev->priv.lag;
mutex_lock(&ldev->lock);
if (__mlx5_lag_is_active(ldev))
mode = get_str_mode_type(ldev);
mutex_unlock(&ldev->lock);
if (!mode)
return -EINVAL;
seq_printf(file, "%s\n", mode);
return 0;
}
static int port_sel_mode_show(struct seq_file *file, void *priv)
{
struct mlx5_core_dev *dev = file->private;
struct mlx5_lag *ldev;
int ret = 0;
char *mode;
ldev = dev->priv.lag;
mutex_lock(&ldev->lock);
if (__mlx5_lag_is_active(ldev))
mode = get_str_port_sel_mode(ldev->flags);
else
ret = -EINVAL;
mutex_unlock(&ldev->lock);
if (ret || !mode)
return ret;
seq_printf(file, "%s\n", mode);
return 0;
}
static int state_show(struct seq_file *file, void *priv)
{
struct mlx5_core_dev *dev = file->private;
struct mlx5_lag *ldev;
bool active;
ldev = dev->priv.lag;
mutex_lock(&ldev->lock);
active = __mlx5_lag_is_active(ldev);
mutex_unlock(&ldev->lock);
seq_printf(file, "%s\n", active ? "active" : "disabled");
return 0;
}
static int flags_show(struct seq_file *file, void *priv)
{
struct mlx5_core_dev *dev = file->private;
struct mlx5_lag *ldev;
bool shared_fdb;
bool lag_active;
ldev = dev->priv.lag;
mutex_lock(&ldev->lock);
lag_active = __mlx5_lag_is_active(ldev);
if (lag_active)
shared_fdb = ldev->shared_fdb;
mutex_unlock(&ldev->lock);
if (!lag_active)
return -EINVAL;
seq_printf(file, "%s:%s\n", "shared_fdb", shared_fdb ? "on" : "off");
return 0;
}
static int mapping_show(struct seq_file *file, void *priv)
{
struct mlx5_core_dev *dev = file->private;
u8 ports[MLX5_MAX_PORTS] = {};
struct mlx5_lag *ldev;
bool hash = false;
bool lag_active;
int num_ports;
int i;
ldev = dev->priv.lag;
mutex_lock(&ldev->lock);
lag_active = __mlx5_lag_is_active(ldev);
if (lag_active) {
if (ldev->flags & MLX5_LAG_FLAG_HASH_BASED) {
mlx5_infer_tx_enabled(&ldev->tracker, ldev->ports, ports,
&num_ports);
hash = true;
} else {
for (i = 0; i < ldev->ports; i++)
ports[i] = ldev->v2p_map[i];
num_ports = ldev->ports;
}
}
mutex_unlock(&ldev->lock);
if (!lag_active)
return -EINVAL;
for (i = 0; i < num_ports; i++) {
if (hash)
seq_printf(file, "%d\n", ports[i] + 1);
else
seq_printf(file, "%d:%d\n", i + 1, ports[i]);
}
return 0;
}
static int members_show(struct seq_file *file, void *priv)
{
struct mlx5_core_dev *dev = file->private;
struct mlx5_lag *ldev;
int i;
ldev = dev->priv.lag;
mutex_lock(&ldev->lock);
for (i = 0; i < ldev->ports; i++) {
if (!ldev->pf[i].dev)
continue;
seq_printf(file, "%s\n", dev_name(ldev->pf[i].dev->device));
}
mutex_unlock(&ldev->lock);
return 0;
}
DEFINE_SHOW_ATTRIBUTE(type);
DEFINE_SHOW_ATTRIBUTE(port_sel_mode);
DEFINE_SHOW_ATTRIBUTE(state);
DEFINE_SHOW_ATTRIBUTE(flags);
DEFINE_SHOW_ATTRIBUTE(mapping);
DEFINE_SHOW_ATTRIBUTE(members);
void mlx5_ldev_add_debugfs(struct mlx5_core_dev *dev)
{
struct dentry *dbg;
dbg = debugfs_create_dir("lag", mlx5_debugfs_get_dev_root(dev));
dev->priv.dbg.lag_debugfs = dbg;
debugfs_create_file("type", 0444, dbg, dev, &type_fops);
debugfs_create_file("port_sel_mode", 0444, dbg, dev, &port_sel_mode_fops);
debugfs_create_file("state", 0444, dbg, dev, &state_fops);
debugfs_create_file("flags", 0444, dbg, dev, &flags_fops);
debugfs_create_file("mapping", 0444, dbg, dev, &mapping_fops);
debugfs_create_file("members", 0444, dbg, dev, &members_fops);
}
void mlx5_ldev_remove_debugfs(struct dentry *dbg)
{
debugfs_remove_recursive(dbg);
}
......@@ -4,6 +4,9 @@
#ifndef __MLX5_LAG_H__
#define __MLX5_LAG_H__
#include <linux/debugfs.h>
#define MLX5_LAG_MAX_HASH_BUCKETS 16
#include "mlx5_core.h"
#include "mp.h"
#include "port_sel.h"
......@@ -45,9 +48,11 @@ struct lag_tracker {
*/
struct mlx5_lag {
u8 flags;
u8 ports;
u8 buckets;
int mode_changes_in_progress;
bool shared_fdb;
u8 v2p_map[MLX5_MAX_PORTS];
u8 v2p_map[MLX5_MAX_PORTS * MLX5_LAG_MAX_HASH_BUCKETS];
struct kref ref;
struct lag_func pf[MLX5_MAX_PORTS];
struct lag_tracker tracker;
......@@ -56,6 +61,8 @@ struct mlx5_lag {
struct notifier_block nb;
struct lag_mp lag_mp;
struct mlx5_lag_port_sel port_sel;
/* Protect lag fields/state changes */
struct mutex lock;
};
static inline struct mlx5_lag *
......@@ -85,4 +92,11 @@ int mlx5_activate_lag(struct mlx5_lag *ldev,
int mlx5_lag_dev_get_netdev_idx(struct mlx5_lag *ldev,
struct net_device *ndev);
char *get_str_port_sel_mode(u8 flags);
void mlx5_infer_tx_enabled(struct lag_tracker *tracker, u8 num_ports,
u8 *ports, int *num_enabled);
void mlx5_ldev_add_debugfs(struct mlx5_core_dev *dev);
void mlx5_ldev_remove_debugfs(struct dentry *dbg);
#endif /* __MLX5_LAG_H__ */
......@@ -12,7 +12,8 @@ enum {
static struct mlx5_flow_group *
mlx5_create_hash_flow_group(struct mlx5_flow_table *ft,
struct mlx5_flow_definer *definer)
struct mlx5_flow_definer *definer,
u8 rules)
{
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
struct mlx5_flow_group *fg;
......@@ -25,7 +26,7 @@ mlx5_create_hash_flow_group(struct mlx5_flow_table *ft,
MLX5_SET(create_flow_group_in, in, match_definer_id,
mlx5_get_match_definer_id(definer));
MLX5_SET(create_flow_group_in, in, start_flow_index, 0);
MLX5_SET(create_flow_group_in, in, end_flow_index, MLX5_MAX_PORTS - 1);
MLX5_SET(create_flow_group_in, in, end_flow_index, rules - 1);
MLX5_SET(create_flow_group_in, in, group_type,
MLX5_CREATE_FLOW_GROUP_IN_GROUP_TYPE_HASH_SPLIT);
......@@ -36,7 +37,7 @@ mlx5_create_hash_flow_group(struct mlx5_flow_table *ft,
static int mlx5_lag_create_port_sel_table(struct mlx5_lag *ldev,
struct mlx5_lag_definer *lag_definer,
u8 port1, u8 port2)
u8 *ports)
{
struct mlx5_core_dev *dev = ldev->pf[MLX5_LAG_P1].dev;
struct mlx5_flow_table_attr ft_attr = {};
......@@ -44,8 +45,10 @@ static int mlx5_lag_create_port_sel_table(struct mlx5_lag *ldev,
MLX5_DECLARE_FLOW_ACT(flow_act);
struct mlx5_flow_namespace *ns;
int err, i;
int idx;
int j;
ft_attr.max_fte = MLX5_MAX_PORTS;
ft_attr.max_fte = ldev->ports * ldev->buckets;
ft_attr.level = MLX5_LAG_FT_LEVEL_DEFINER;
ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_PORT_SEL);
......@@ -61,7 +64,8 @@ static int mlx5_lag_create_port_sel_table(struct mlx5_lag *ldev,
}
lag_definer->fg = mlx5_create_hash_flow_group(lag_definer->ft,
lag_definer->definer);
lag_definer->definer,
ft_attr.max_fte);
if (IS_ERR(lag_definer->fg)) {
err = PTR_ERR(lag_definer->fg);
goto destroy_ft;
......@@ -70,19 +74,25 @@ static int mlx5_lag_create_port_sel_table(struct mlx5_lag *ldev,
dest.type = MLX5_FLOW_DESTINATION_TYPE_UPLINK;
dest.vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
flow_act.flags |= FLOW_ACT_NO_APPEND;
for (i = 0; i < MLX5_MAX_PORTS; i++) {
u8 affinity = i == 0 ? port1 : port2;
dest.vport.vhca_id = MLX5_CAP_GEN(ldev->pf[affinity - 1].dev,
vhca_id);
lag_definer->rules[i] = mlx5_add_flow_rules(lag_definer->ft,
NULL, &flow_act,
&dest, 1);
if (IS_ERR(lag_definer->rules[i])) {
err = PTR_ERR(lag_definer->rules[i]);
while (i--)
mlx5_del_flow_rules(lag_definer->rules[i]);
goto destroy_fg;
for (i = 0; i < ldev->ports; i++) {
for (j = 0; j < ldev->buckets; j++) {
u8 affinity;
idx = i * ldev->buckets + j;
affinity = ports[idx];
dest.vport.vhca_id = MLX5_CAP_GEN(ldev->pf[affinity - 1].dev,
vhca_id);
lag_definer->rules[idx] = mlx5_add_flow_rules(lag_definer->ft,
NULL, &flow_act,
&dest, 1);
if (IS_ERR(lag_definer->rules[idx])) {
err = PTR_ERR(lag_definer->rules[idx]);
while (i--)
while (j--)
mlx5_del_flow_rules(lag_definer->rules[idx]);
goto destroy_fg;
}
}
}
......@@ -279,8 +289,7 @@ static int mlx5_lag_set_definer(u32 *match_definer_mask,
static struct mlx5_lag_definer *
mlx5_lag_create_definer(struct mlx5_lag *ldev, enum netdev_lag_hash hash,
enum mlx5_traffic_types tt, bool tunnel, u8 port1,
u8 port2)
enum mlx5_traffic_types tt, bool tunnel, u8 *ports)
{
struct mlx5_core_dev *dev = ldev->pf[MLX5_LAG_P1].dev;
struct mlx5_lag_definer *lag_definer;
......@@ -308,7 +317,7 @@ mlx5_lag_create_definer(struct mlx5_lag *ldev, enum netdev_lag_hash hash,
goto free_mask;
}
err = mlx5_lag_create_port_sel_table(ldev, lag_definer, port1, port2);
err = mlx5_lag_create_port_sel_table(ldev, lag_definer, ports);
if (err)
goto destroy_match_definer;
......@@ -329,10 +338,16 @@ static void mlx5_lag_destroy_definer(struct mlx5_lag *ldev,
struct mlx5_lag_definer *lag_definer)
{
struct mlx5_core_dev *dev = ldev->pf[MLX5_LAG_P1].dev;
int idx;
int i;
int j;
for (i = 0; i < MLX5_MAX_PORTS; i++)
mlx5_del_flow_rules(lag_definer->rules[i]);
for (i = 0; i < ldev->ports; i++) {
for (j = 0; j < ldev->buckets; j++) {
idx = i * ldev->buckets + j;
mlx5_del_flow_rules(lag_definer->rules[idx]);
}
}
mlx5_destroy_flow_group(lag_definer->fg);
mlx5_destroy_flow_table(lag_definer->ft);
mlx5_destroy_match_definer(dev, lag_definer->definer);
......@@ -356,7 +371,7 @@ static void mlx5_lag_destroy_definers(struct mlx5_lag *ldev)
static int mlx5_lag_create_definers(struct mlx5_lag *ldev,
enum netdev_lag_hash hash_type,
u8 port1, u8 port2)
u8 *ports)
{
struct mlx5_lag_port_sel *port_sel = &ldev->port_sel;
struct mlx5_lag_definer *lag_definer;
......@@ -364,7 +379,7 @@ static int mlx5_lag_create_definers(struct mlx5_lag *ldev,
for_each_set_bit(tt, port_sel->tt_map, MLX5_NUM_TT) {
lag_definer = mlx5_lag_create_definer(ldev, hash_type, tt,
false, port1, port2);
false, ports);
if (IS_ERR(lag_definer)) {
err = PTR_ERR(lag_definer);
goto destroy_definers;
......@@ -376,7 +391,7 @@ static int mlx5_lag_create_definers(struct mlx5_lag *ldev,
lag_definer =
mlx5_lag_create_definer(ldev, hash_type, tt,
true, port1, port2);
true, ports);
if (IS_ERR(lag_definer)) {
err = PTR_ERR(lag_definer);
goto destroy_definers;
......@@ -513,13 +528,13 @@ static int mlx5_lag_create_inner_ttc_table(struct mlx5_lag *ldev)
}
int mlx5_lag_port_sel_create(struct mlx5_lag *ldev,
enum netdev_lag_hash hash_type, u8 port1, u8 port2)
enum netdev_lag_hash hash_type, u8 *ports)
{
struct mlx5_lag_port_sel *port_sel = &ldev->port_sel;
int err;
set_tt_map(port_sel, hash_type);
err = mlx5_lag_create_definers(ldev, hash_type, port1, port2);
err = mlx5_lag_create_definers(ldev, hash_type, ports);
if (err)
return err;
......@@ -543,52 +558,62 @@ int mlx5_lag_port_sel_create(struct mlx5_lag *ldev,
return err;
}
static int
mlx5_lag_modify_definers_destinations(struct mlx5_lag *ldev,
struct mlx5_lag_definer **definers,
u8 port1, u8 port2)
static int __mlx5_lag_modify_definers_destinations(struct mlx5_lag *ldev,
struct mlx5_lag_definer *def,
u8 *ports)
{
struct mlx5_lag_port_sel *port_sel = &ldev->port_sel;
struct mlx5_flow_destination dest = {};
int idx;
int err;
int tt;
int i;
int j;
dest.type = MLX5_FLOW_DESTINATION_TYPE_UPLINK;
dest.vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
for_each_set_bit(tt, port_sel->tt_map, MLX5_NUM_TT) {
struct mlx5_flow_handle **rules = definers[tt]->rules;
for (i = 0; i < ldev->ports; i++) {
for (j = 0; j < ldev->buckets; j++) {
idx = i * ldev->buckets + j;
if (ldev->v2p_map[i] == ports[i])
continue;
if (ldev->v2p_map[MLX5_LAG_P1] != port1) {
dest.vport.vhca_id =
MLX5_CAP_GEN(ldev->pf[port1 - 1].dev, vhca_id);
err = mlx5_modify_rule_destination(rules[MLX5_LAG_P1],
&dest, NULL);
dest.vport.vhca_id = MLX5_CAP_GEN(ldev->pf[ports[idx] - 1].dev,
vhca_id);
err = mlx5_modify_rule_destination(def->rules[idx], &dest, NULL);
if (err)
return err;
}
}
if (ldev->v2p_map[MLX5_LAG_P2] != port2) {
dest.vport.vhca_id =
MLX5_CAP_GEN(ldev->pf[port2 - 1].dev, vhca_id);
err = mlx5_modify_rule_destination(rules[MLX5_LAG_P2],
&dest, NULL);
if (err)
return err;
}
return 0;
}
static int
mlx5_lag_modify_definers_destinations(struct mlx5_lag *ldev,
struct mlx5_lag_definer **definers,
u8 *ports)
{
struct mlx5_lag_port_sel *port_sel = &ldev->port_sel;
int err;
int tt;
for_each_set_bit(tt, port_sel->tt_map, MLX5_NUM_TT) {
err = __mlx5_lag_modify_definers_destinations(ldev, definers[tt], ports);
if (err)
return err;
}
return 0;
}
int mlx5_lag_port_sel_modify(struct mlx5_lag *ldev, u8 port1, u8 port2)
int mlx5_lag_port_sel_modify(struct mlx5_lag *ldev, u8 *ports)
{
struct mlx5_lag_port_sel *port_sel = &ldev->port_sel;
int err;
err = mlx5_lag_modify_definers_destinations(ldev,
port_sel->outer.definers,
port1, port2);
ports);
if (err)
return err;
......@@ -597,7 +622,7 @@ int mlx5_lag_port_sel_modify(struct mlx5_lag *ldev, u8 port1, u8 port2)
return mlx5_lag_modify_definers_destinations(ldev,
port_sel->inner.definers,
port1, port2);
ports);
}
void mlx5_lag_port_sel_destroy(struct mlx5_lag *ldev)
......
......@@ -10,7 +10,10 @@ struct mlx5_lag_definer {
struct mlx5_flow_definer *definer;
struct mlx5_flow_table *ft;
struct mlx5_flow_group *fg;
struct mlx5_flow_handle *rules[MLX5_MAX_PORTS];
/* Each port has ldev->buckets number of rules and they are arrange in
* [port * buckets .. port * buckets + buckets) locations
*/
struct mlx5_flow_handle *rules[MLX5_MAX_PORTS * MLX5_LAG_MAX_HASH_BUCKETS];
};
struct mlx5_lag_ttc {
......@@ -27,22 +30,20 @@ struct mlx5_lag_port_sel {
#ifdef CONFIG_MLX5_ESWITCH
int mlx5_lag_port_sel_modify(struct mlx5_lag *ldev, u8 port1, u8 port2);
int mlx5_lag_port_sel_modify(struct mlx5_lag *ldev, u8 *ports);
void mlx5_lag_port_sel_destroy(struct mlx5_lag *ldev);
int mlx5_lag_port_sel_create(struct mlx5_lag *ldev,
enum netdev_lag_hash hash_type, u8 port1,
u8 port2);
enum netdev_lag_hash hash_type, u8 *ports);
#else /* CONFIG_MLX5_ESWITCH */
static inline int mlx5_lag_port_sel_create(struct mlx5_lag *ldev,
enum netdev_lag_hash hash_type,
u8 port1, u8 port2)
u8 *ports)
{
return 0;
}
static inline int mlx5_lag_port_sel_modify(struct mlx5_lag *ldev, u8 port1,
u8 port2)
static inline int mlx5_lag_port_sel_modify(struct mlx5_lag *ldev, u8 *ports)
{
return 0;
}
......
......@@ -14,7 +14,7 @@ static LIST_HEAD(devcom_list);
struct mlx5_devcom_component {
struct {
void *data;
} device[MLX5_MAX_PORTS];
} device[MLX5_DEVCOM_PORTS_SUPPORTED];
mlx5_devcom_event_handler_t handler;
struct rw_semaphore sem;
......@@ -25,7 +25,7 @@ struct mlx5_devcom_list {
struct list_head list;
struct mlx5_devcom_component components[MLX5_DEVCOM_NUM_COMPONENTS];
struct mlx5_core_dev *devs[MLX5_MAX_PORTS];
struct mlx5_core_dev *devs[MLX5_DEVCOM_PORTS_SUPPORTED];
};
struct mlx5_devcom {
......@@ -74,13 +74,15 @@ struct mlx5_devcom *mlx5_devcom_register_device(struct mlx5_core_dev *dev)
if (!mlx5_core_is_pf(dev))
return NULL;
if (MLX5_CAP_GEN(dev, num_lag_ports) != MLX5_DEVCOM_PORTS_SUPPORTED)
return NULL;
sguid0 = mlx5_query_nic_system_image_guid(dev);
list_for_each_entry(iter, &devcom_list, list) {
struct mlx5_core_dev *tmp_dev = NULL;
idx = -1;
for (i = 0; i < MLX5_MAX_PORTS; i++) {
for (i = 0; i < MLX5_DEVCOM_PORTS_SUPPORTED; i++) {
if (iter->devs[i])
tmp_dev = iter->devs[i];
else
......@@ -134,11 +136,11 @@ void mlx5_devcom_unregister_device(struct mlx5_devcom *devcom)
kfree(devcom);
for (i = 0; i < MLX5_MAX_PORTS; i++)
for (i = 0; i < MLX5_DEVCOM_PORTS_SUPPORTED; i++)
if (priv->devs[i])
break;
if (i != MLX5_MAX_PORTS)
if (i != MLX5_DEVCOM_PORTS_SUPPORTED)
return;
list_del(&priv->list);
......@@ -191,7 +193,7 @@ int mlx5_devcom_send_event(struct mlx5_devcom *devcom,
comp = &devcom->priv->components[id];
down_write(&comp->sem);
for (i = 0; i < MLX5_MAX_PORTS; i++)
for (i = 0; i < MLX5_DEVCOM_PORTS_SUPPORTED; i++)
if (i != devcom->idx && comp->device[i].data) {
err = comp->handler(event, comp->device[i].data,
event_data);
......@@ -239,7 +241,7 @@ void *mlx5_devcom_get_peer_data(struct mlx5_devcom *devcom,
return NULL;
}
for (i = 0; i < MLX5_MAX_PORTS; i++)
for (i = 0; i < MLX5_DEVCOM_PORTS_SUPPORTED; i++)
if (i != devcom->idx)
break;
......
......@@ -6,6 +6,8 @@
#include <linux/mlx5/driver.h>
#define MLX5_DEVCOM_PORTS_SUPPORTED 2
enum mlx5_devcom_components {
MLX5_DEVCOM_ESW_OFFLOADS,
......
......@@ -10,6 +10,7 @@ struct mlx5_timeouts {
static const u32 tout_def_sw_val[MAX_TIMEOUT_TYPES] = {
[MLX5_TO_FW_PRE_INIT_TIMEOUT_MS] = 120000,
[MLX5_TO_FW_PRE_INIT_ON_RECOVERY_TIMEOUT_MS] = 7200000,
[MLX5_TO_FW_PRE_INIT_WARN_MESSAGE_INTERVAL_MS] = 20000,
[MLX5_TO_FW_PRE_INIT_WAIT_MS] = 2,
[MLX5_TO_FW_INIT_MS] = 2000,
......
......@@ -7,6 +7,7 @@
enum mlx5_timeouts_types {
/* pre init timeouts (not read from FW) */
MLX5_TO_FW_PRE_INIT_TIMEOUT_MS,
MLX5_TO_FW_PRE_INIT_ON_RECOVERY_TIMEOUT_MS,
MLX5_TO_FW_PRE_INIT_WARN_MESSAGE_INTERVAL_MS,
MLX5_TO_FW_PRE_INIT_WAIT_MS,
......
......@@ -189,7 +189,8 @@ static int wait_fw_init(struct mlx5_core_dev *dev, u32 max_wait_mili,
fw_initializing = ioread32be(&dev->iseg->initializing);
if (!(fw_initializing >> 31))
break;
if (time_after(jiffies, end)) {
if (time_after(jiffies, end) ||
test_and_clear_bit(MLX5_BREAK_FW_WAIT, &dev->intf_state)) {
err = -EBUSY;
break;
}
......@@ -1002,7 +1003,7 @@ static void mlx5_cleanup_once(struct mlx5_core_dev *dev)
mlx5_devcom_unregister_device(dev->priv.devcom);
}
static int mlx5_function_setup(struct mlx5_core_dev *dev, bool boot)
static int mlx5_function_setup(struct mlx5_core_dev *dev, u64 timeout)
{
int err;
......@@ -1017,11 +1018,11 @@ static int mlx5_function_setup(struct mlx5_core_dev *dev, bool boot)
/* wait for firmware to accept initialization segments configurations
*/
err = wait_fw_init(dev, mlx5_tout_ms(dev, FW_PRE_INIT_TIMEOUT),
err = wait_fw_init(dev, timeout,
mlx5_tout_ms(dev, FW_PRE_INIT_WARN_MESSAGE_INTERVAL));
if (err) {
mlx5_core_err(dev, "Firmware over %llu MS in pre-initializing state, aborting\n",
mlx5_tout_ms(dev, FW_PRE_INIT_TIMEOUT));
timeout);
return err;
}
......@@ -1271,7 +1272,7 @@ int mlx5_init_one(struct mlx5_core_dev *dev)
mutex_lock(&dev->intf_state_mutex);
dev->state = MLX5_DEVICE_STATE_UP;
err = mlx5_function_setup(dev, true);
err = mlx5_function_setup(dev, mlx5_tout_ms(dev, FW_PRE_INIT_TIMEOUT));
if (err)
goto err_function;
......@@ -1335,9 +1336,10 @@ void mlx5_uninit_one(struct mlx5_core_dev *dev)
mutex_unlock(&dev->intf_state_mutex);
}
int mlx5_load_one(struct mlx5_core_dev *dev)
int mlx5_load_one(struct mlx5_core_dev *dev, bool recovery)
{
int err = 0;
u64 timeout;
mutex_lock(&dev->intf_state_mutex);
if (test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) {
......@@ -1347,7 +1349,11 @@ int mlx5_load_one(struct mlx5_core_dev *dev)
/* remove any previous indication of internal error */
dev->state = MLX5_DEVICE_STATE_UP;
err = mlx5_function_setup(dev, false);
if (recovery)
timeout = mlx5_tout_ms(dev, FW_PRE_INIT_ON_RECOVERY_TIMEOUT);
else
timeout = mlx5_tout_ms(dev, FW_PRE_INIT_TIMEOUT);
err = mlx5_function_setup(dev, timeout);
if (err)
goto err_function;
......@@ -1602,6 +1608,7 @@ static void remove_one(struct pci_dev *pdev)
struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
struct devlink *devlink = priv_to_devlink(dev);
set_bit(MLX5_BREAK_FW_WAIT, &dev->intf_state);
devlink_unregister(devlink);
mlx5_sriov_disable(pdev);
mlx5_crdump_disable(dev);
......@@ -1717,7 +1724,7 @@ static void mlx5_pci_resume(struct pci_dev *pdev)
mlx5_pci_trace(dev, "Enter, loading driver..\n");
err = mlx5_load_one(dev);
err = mlx5_load_one(dev, false);
mlx5_pci_trace(dev, "Done, err = %d, device %s\n", err,
!err ? "recovered" : "Failed");
......@@ -1785,6 +1792,7 @@ static void shutdown(struct pci_dev *pdev)
int err;
mlx5_core_info(dev, "Shutdown was called\n");
set_bit(MLX5_BREAK_FW_WAIT, &dev->intf_state);
err = mlx5_try_fast_unload(dev);
if (err)
mlx5_unload_one(dev);
......@@ -1804,7 +1812,7 @@ static int mlx5_resume(struct pci_dev *pdev)
{
struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
return mlx5_load_one(dev);
return mlx5_load_one(dev, false);
}
static const struct pci_device_id mlx5_core_pci_table[] = {
......@@ -1849,7 +1857,7 @@ int mlx5_recover_device(struct mlx5_core_dev *dev)
return -EIO;
}
return mlx5_load_one(dev);
return mlx5_load_one(dev, true);
}
static struct pci_driver mlx5_core_driver = {
......
......@@ -210,6 +210,7 @@ void mlx5_detach_device(struct mlx5_core_dev *dev);
int mlx5_register_device(struct mlx5_core_dev *dev);
void mlx5_unregister_device(struct mlx5_core_dev *dev);
struct mlx5_core_dev *mlx5_get_next_phys_dev(struct mlx5_core_dev *dev);
struct mlx5_core_dev *mlx5_get_next_phys_dev_lag(struct mlx5_core_dev *dev);
void mlx5_dev_list_lock(void);
void mlx5_dev_list_unlock(void);
int mlx5_dev_list_trylock(void);
......@@ -290,7 +291,7 @@ void mlx5_mdev_uninit(struct mlx5_core_dev *dev);
int mlx5_init_one(struct mlx5_core_dev *dev);
void mlx5_uninit_one(struct mlx5_core_dev *dev);
void mlx5_unload_one(struct mlx5_core_dev *dev);
int mlx5_load_one(struct mlx5_core_dev *dev);
int mlx5_load_one(struct mlx5_core_dev *dev, bool recovery);
int mlx5_vport_get_other_func_cap(struct mlx5_core_dev *dev, u16 function_id, void *out);
......
......@@ -84,7 +84,7 @@ enum mlx5_sqp_t {
};
enum {
MLX5_MAX_PORTS = 2,
MLX5_MAX_PORTS = 4,
};
enum {
......@@ -558,6 +558,7 @@ struct mlx5_debugfs_entries {
struct dentry *cq_debugfs;
struct dentry *cmdif_debugfs;
struct dentry *pages_debugfs;
struct dentry *lag_debugfs;
};
struct mlx5_ft_pool;
......@@ -632,6 +633,7 @@ enum mlx5_device_state {
enum mlx5_interface_state {
MLX5_INTERFACE_STATE_UP = BIT(0),
MLX5_BREAK_FW_WAIT = BIT(1),
};
enum mlx5_pci_status {
......@@ -1141,6 +1143,7 @@ int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev,
int num_counters,
size_t *offsets);
struct mlx5_core_dev *mlx5_lag_get_peer_mdev(struct mlx5_core_dev *dev);
u8 mlx5_lag_get_num_ports(struct mlx5_core_dev *dev);
struct mlx5_uars_page *mlx5_get_uars_page(struct mlx5_core_dev *mdev);
void mlx5_put_uars_page(struct mlx5_core_dev *mdev, struct mlx5_uars_page *up);
int mlx5_dm_sw_icm_alloc(struct mlx5_core_dev *dev, enum mlx5_sw_icm_type type,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment