Commit 14340219 authored by David S. Miller's avatar David S. Miller

Merge tag 'mlx5-updates-2020-03-25' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saeed Mahameed says:

====================
mlx5-updates-2020-03-25

1) Cleanups from Dan Carpenter and wenxu.

2) Paul and Roi, Some minor updates and fixes to E-Switch to address
issues introduced in the previous reg_c0 updates series.

3) Eli Cohen simplifies and improves flow steering matching group searches
and flow table entries version management.

4) Parav Pandit, improves devlink eswitch mode changes thread safety.
By making devlink rely on driver for thread safety and introducing mlx5
eswitch mode change protection.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 9d6a36c7 8e0aa4bc
...@@ -193,7 +193,7 @@ bool mlx5_device_registered(struct mlx5_core_dev *dev) ...@@ -193,7 +193,7 @@ bool mlx5_device_registered(struct mlx5_core_dev *dev)
return found; return found;
} }
int mlx5_register_device(struct mlx5_core_dev *dev) void mlx5_register_device(struct mlx5_core_dev *dev)
{ {
struct mlx5_priv *priv = &dev->priv; struct mlx5_priv *priv = &dev->priv;
struct mlx5_interface *intf; struct mlx5_interface *intf;
...@@ -203,8 +203,6 @@ int mlx5_register_device(struct mlx5_core_dev *dev) ...@@ -203,8 +203,6 @@ int mlx5_register_device(struct mlx5_core_dev *dev)
list_for_each_entry(intf, &intf_list, list) list_for_each_entry(intf, &intf_list, list)
mlx5_add_device(intf, priv); mlx5_add_device(intf, priv);
mutex_unlock(&mlx5_intf_mutex); mutex_unlock(&mlx5_intf_mutex);
return 0;
} }
void mlx5_unregister_device(struct mlx5_core_dev *dev) void mlx5_unregister_device(struct mlx5_core_dev *dev)
......
...@@ -90,7 +90,8 @@ static int mlx5_devlink_reload_down(struct devlink *devlink, bool netns_change, ...@@ -90,7 +90,8 @@ static int mlx5_devlink_reload_down(struct devlink *devlink, bool netns_change,
{ {
struct mlx5_core_dev *dev = devlink_priv(devlink); struct mlx5_core_dev *dev = devlink_priv(devlink);
return mlx5_unload_one(dev, false); mlx5_unload_one(dev, false);
return 0;
} }
static int mlx5_devlink_reload_up(struct devlink *devlink, static int mlx5_devlink_reload_up(struct devlink *devlink,
......
...@@ -1246,8 +1246,7 @@ static int mlx5e_rep_setup_ft_cb(enum tc_setup_type type, void *type_data, ...@@ -1246,8 +1246,7 @@ static int mlx5e_rep_setup_ft_cb(enum tc_setup_type type, void *type_data,
case TC_SETUP_CLSFLOWER: case TC_SETUP_CLSFLOWER:
memcpy(&tmp, f, sizeof(*f)); memcpy(&tmp, f, sizeof(*f));
if (!mlx5_esw_chains_prios_supported(esw) || if (!mlx5_esw_chains_prios_supported(esw))
tmp.common.chain_index)
return -EOPNOTSUPP; return -EOPNOTSUPP;
/* Re-use tc offload path by moving the ft flow to the /* Re-use tc offload path by moving the ft flow to the
......
...@@ -3058,7 +3058,7 @@ static bool actions_match_supported(struct mlx5e_priv *priv, ...@@ -3058,7 +3058,7 @@ static bool actions_match_supported(struct mlx5e_priv *priv,
*/ */
NL_SET_ERR_MSG_MOD(extack, NL_SET_ERR_MSG_MOD(extack,
"Can't offload mirroring with action ct"); "Can't offload mirroring with action ct");
return -EOPNOTSUPP; return false;
} }
} else { } else {
actions = flow->nic_attr->action; actions = flow->nic_attr->action;
......
...@@ -2067,12 +2067,54 @@ static void mlx5_eswitch_get_devlink_param(struct mlx5_eswitch *esw) ...@@ -2067,12 +2067,54 @@ static void mlx5_eswitch_get_devlink_param(struct mlx5_eswitch *esw)
} }
} }
int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int mode) static void
mlx5_eswitch_update_num_of_vfs(struct mlx5_eswitch *esw, int num_vfs)
{
const u32 *out;
WARN_ON_ONCE(esw->mode != MLX5_ESWITCH_NONE);
if (num_vfs < 0)
return;
if (!mlx5_core_is_ecpf_esw_manager(esw->dev)) {
esw->esw_funcs.num_vfs = num_vfs;
return;
}
out = mlx5_esw_query_functions(esw->dev);
if (IS_ERR(out))
return;
esw->esw_funcs.num_vfs = MLX5_GET(query_esw_functions_out, out,
host_params_context.host_num_of_vfs);
kvfree(out);
}
/**
* mlx5_eswitch_enable_locked - Enable eswitch
* @esw: Pointer to eswitch
* @mode: Eswitch mode to enable
* @num_vfs: Enable eswitch for given number of VFs. This is optional.
* Valid value are 0, > 0 and MLX5_ESWITCH_IGNORE_NUM_VFS.
* Caller should pass num_vfs > 0 when enabling eswitch for
* vf vports. Caller should pass num_vfs = 0, when eswitch
* is enabled without sriov VFs or when caller
* is unaware of the sriov state of the host PF on ECPF based
* eswitch. Caller should pass < 0 when num_vfs should be
* completely ignored. This is typically the case when eswitch
* is enabled without sriov regardless of PF/ECPF system.
* mlx5_eswitch_enable_locked() Enables eswitch in either legacy or offloads
* mode. If num_vfs >=0 is provided, it setup VF related eswitch vports.
* It returns 0 on success or error code on failure.
*/
int mlx5_eswitch_enable_locked(struct mlx5_eswitch *esw, int mode, int num_vfs)
{ {
int err; int err;
if (!ESW_ALLOWED(esw) || lockdep_assert_held(&esw->mode_lock);
!MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ft_support)) {
if (!MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ft_support)) {
esw_warn(esw->dev, "FDB is not supported, aborting ...\n"); esw_warn(esw->dev, "FDB is not supported, aborting ...\n");
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
...@@ -2085,6 +2127,8 @@ int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int mode) ...@@ -2085,6 +2127,8 @@ int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int mode)
mlx5_eswitch_get_devlink_param(esw); mlx5_eswitch_get_devlink_param(esw);
mlx5_eswitch_update_num_of_vfs(esw, num_vfs);
esw_create_tsar(esw); esw_create_tsar(esw);
esw->mode = mode; esw->mode = mode;
...@@ -2121,11 +2165,34 @@ int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int mode) ...@@ -2121,11 +2165,34 @@ int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int mode)
return err; return err;
} }
void mlx5_eswitch_disable(struct mlx5_eswitch *esw, bool clear_vf) /**
* mlx5_eswitch_enable - Enable eswitch
* @esw: Pointer to eswitch
* @num_vfs: Enable eswitch swich for given number of VFs.
* Caller must pass num_vfs > 0 when enabling eswitch for
* vf vports.
* mlx5_eswitch_enable() returns 0 on success or error code on failure.
*/
int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs)
{
int ret;
if (!ESW_ALLOWED(esw))
return 0;
mutex_lock(&esw->mode_lock);
ret = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_LEGACY, num_vfs);
mutex_unlock(&esw->mode_lock);
return ret;
}
void mlx5_eswitch_disable_locked(struct mlx5_eswitch *esw, bool clear_vf)
{ {
int old_mode; int old_mode;
if (!ESW_ALLOWED(esw) || esw->mode == MLX5_ESWITCH_NONE) lockdep_assert_held_write(&esw->mode_lock);
if (esw->mode == MLX5_ESWITCH_NONE)
return; return;
esw_info(esw->dev, "Disable: mode(%s), nvfs(%d), active vports(%d)\n", esw_info(esw->dev, "Disable: mode(%s), nvfs(%d), active vports(%d)\n",
...@@ -2154,6 +2221,16 @@ void mlx5_eswitch_disable(struct mlx5_eswitch *esw, bool clear_vf) ...@@ -2154,6 +2221,16 @@ void mlx5_eswitch_disable(struct mlx5_eswitch *esw, bool clear_vf)
mlx5_eswitch_clear_vf_vports_info(esw); mlx5_eswitch_clear_vf_vports_info(esw);
} }
void mlx5_eswitch_disable(struct mlx5_eswitch *esw, bool clear_vf)
{
if (!ESW_ALLOWED(esw))
return;
mutex_lock(&esw->mode_lock);
mlx5_eswitch_disable_locked(esw, clear_vf);
mutex_unlock(&esw->mode_lock);
}
int mlx5_eswitch_init(struct mlx5_core_dev *dev) int mlx5_eswitch_init(struct mlx5_core_dev *dev)
{ {
struct mlx5_eswitch *esw; struct mlx5_eswitch *esw;
...@@ -2205,6 +2282,7 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev) ...@@ -2205,6 +2282,7 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
hash_init(esw->offloads.mod_hdr.hlist); hash_init(esw->offloads.mod_hdr.hlist);
atomic64_set(&esw->offloads.num_flows, 0); atomic64_set(&esw->offloads.num_flows, 0);
mutex_init(&esw->state_lock); mutex_init(&esw->state_lock);
mutex_init(&esw->mode_lock);
mlx5_esw_for_all_vports(esw, i, vport) { mlx5_esw_for_all_vports(esw, i, vport) {
vport->vport = mlx5_eswitch_index_to_vport_num(esw, i); vport->vport = mlx5_eswitch_index_to_vport_num(esw, i);
...@@ -2239,6 +2317,7 @@ void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw) ...@@ -2239,6 +2317,7 @@ void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw)
esw->dev->priv.eswitch = NULL; esw->dev->priv.eswitch = NULL;
destroy_workqueue(esw->work_queue); destroy_workqueue(esw->work_queue);
esw_offloads_cleanup_reps(esw); esw_offloads_cleanup_reps(esw);
mutex_destroy(&esw->mode_lock);
mutex_destroy(&esw->state_lock); mutex_destroy(&esw->state_lock);
mutex_destroy(&esw->offloads.mod_hdr.lock); mutex_destroy(&esw->offloads.mod_hdr.lock);
mutex_destroy(&esw->offloads.encap_tbl_lock); mutex_destroy(&esw->offloads.encap_tbl_lock);
...@@ -2811,22 +2890,4 @@ bool mlx5_esw_multipath_prereq(struct mlx5_core_dev *dev0, ...@@ -2811,22 +2890,4 @@ bool mlx5_esw_multipath_prereq(struct mlx5_core_dev *dev0,
dev1->priv.eswitch->mode == MLX5_ESWITCH_OFFLOADS); dev1->priv.eswitch->mode == MLX5_ESWITCH_OFFLOADS);
} }
void mlx5_eswitch_update_num_of_vfs(struct mlx5_eswitch *esw, const int num_vfs)
{
const u32 *out;
WARN_ON_ONCE(esw->mode != MLX5_ESWITCH_NONE);
if (!mlx5_core_is_ecpf_esw_manager(esw->dev)) {
esw->esw_funcs.num_vfs = num_vfs;
return;
}
out = mlx5_esw_query_functions(esw->dev);
if (IS_ERR(out))
return;
esw->esw_funcs.num_vfs = MLX5_GET(query_esw_functions_out, out,
host_params_context.host_num_of_vfs);
kvfree(out);
}
...@@ -258,6 +258,11 @@ struct mlx5_eswitch { ...@@ -258,6 +258,11 @@ struct mlx5_eswitch {
*/ */
struct mutex state_lock; struct mutex state_lock;
/* Protects eswitch mode change that occurs via one or more
* user commands, i.e. sriov state change, devlink commands.
*/
struct mutex mode_lock;
struct { struct {
bool enabled; bool enabled;
u32 root_tsar_id; u32 root_tsar_id;
...@@ -296,7 +301,11 @@ int mlx5_esw_modify_vport_rate(struct mlx5_eswitch *esw, u16 vport_num, ...@@ -296,7 +301,11 @@ int mlx5_esw_modify_vport_rate(struct mlx5_eswitch *esw, u16 vport_num,
/* E-Switch API */ /* E-Switch API */
int mlx5_eswitch_init(struct mlx5_core_dev *dev); int mlx5_eswitch_init(struct mlx5_core_dev *dev);
void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw); void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw);
int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int mode);
#define MLX5_ESWITCH_IGNORE_NUM_VFS (-1)
int mlx5_eswitch_enable_locked(struct mlx5_eswitch *esw, int mode, int num_vfs);
int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs);
void mlx5_eswitch_disable_locked(struct mlx5_eswitch *esw, bool clear_vf);
void mlx5_eswitch_disable(struct mlx5_eswitch *esw, bool clear_vf); void mlx5_eswitch_disable(struct mlx5_eswitch *esw, bool clear_vf);
int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw, int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
u16 vport, u8 mac[ETH_ALEN]); u16 vport, u8 mac[ETH_ALEN]);
...@@ -635,7 +644,6 @@ mlx5_eswitch_get_vport(struct mlx5_eswitch *esw, u16 vport_num); ...@@ -635,7 +644,6 @@ mlx5_eswitch_get_vport(struct mlx5_eswitch *esw, u16 vport_num);
bool mlx5_eswitch_is_vf_vport(const struct mlx5_eswitch *esw, u16 vport_num); bool mlx5_eswitch_is_vf_vport(const struct mlx5_eswitch *esw, u16 vport_num);
void mlx5_eswitch_update_num_of_vfs(struct mlx5_eswitch *esw, const int num_vfs);
int mlx5_esw_funcs_changed_handler(struct notifier_block *nb, unsigned long type, void *data); int mlx5_esw_funcs_changed_handler(struct notifier_block *nb, unsigned long type, void *data);
int int
...@@ -673,7 +681,7 @@ void mlx5_eswitch_unload_vf_vports(struct mlx5_eswitch *esw, u16 num_vfs); ...@@ -673,7 +681,7 @@ void mlx5_eswitch_unload_vf_vports(struct mlx5_eswitch *esw, u16 num_vfs);
/* eswitch API stubs */ /* eswitch API stubs */
static inline int mlx5_eswitch_init(struct mlx5_core_dev *dev) { return 0; } static inline int mlx5_eswitch_init(struct mlx5_core_dev *dev) { return 0; }
static inline void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw) {} static inline void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw) {}
static inline int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int mode) { return 0; } static inline int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs) { return 0; }
static inline void mlx5_eswitch_disable(struct mlx5_eswitch *esw, bool clear_vf) {} static inline void mlx5_eswitch_disable(struct mlx5_eswitch *esw, bool clear_vf) {}
static inline bool mlx5_esw_lag_prereq(struct mlx5_core_dev *dev0, struct mlx5_core_dev *dev1) { return true; } static inline bool mlx5_esw_lag_prereq(struct mlx5_core_dev *dev0, struct mlx5_core_dev *dev1) { return true; }
static inline bool mlx5_eswitch_is_funcs_handler(struct mlx5_core_dev *dev) { return false; } static inline bool mlx5_eswitch_is_funcs_handler(struct mlx5_core_dev *dev) { return false; }
...@@ -682,14 +690,11 @@ static inline const u32 *mlx5_esw_query_functions(struct mlx5_core_dev *dev) ...@@ -682,14 +690,11 @@ static inline const u32 *mlx5_esw_query_functions(struct mlx5_core_dev *dev)
return ERR_PTR(-EOPNOTSUPP); return ERR_PTR(-EOPNOTSUPP);
} }
static inline void mlx5_eswitch_update_num_of_vfs(struct mlx5_eswitch *esw, const int num_vfs) {}
static inline struct mlx5_flow_handle * static inline struct mlx5_flow_handle *
esw_add_restore_rule(struct mlx5_eswitch *esw, u32 tag) esw_add_restore_rule(struct mlx5_eswitch *esw, u32 tag)
{ {
return ERR_PTR(-EOPNOTSUPP); return ERR_PTR(-EOPNOTSUPP);
} }
#endif /* CONFIG_MLX5_ESWITCH */ #endif /* CONFIG_MLX5_ESWITCH */
#endif /* __MLX5_ESWITCH_H__ */ #endif /* __MLX5_ESWITCH_H__ */
...@@ -1069,6 +1069,9 @@ esw_add_restore_rule(struct mlx5_eswitch *esw, u32 tag) ...@@ -1069,6 +1069,9 @@ esw_add_restore_rule(struct mlx5_eswitch *esw, u32 tag)
struct mlx5_flow_spec *spec; struct mlx5_flow_spec *spec;
void *misc; void *misc;
if (!mlx5_eswitch_reg_c1_loopback_supported(esw))
return ERR_PTR(-EOPNOTSUPP);
spec = kzalloc(sizeof(*spec), GFP_KERNEL); spec = kzalloc(sizeof(*spec), GFP_KERNEL);
if (!spec) if (!spec)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
...@@ -1477,6 +1480,9 @@ static void esw_destroy_restore_table(struct mlx5_eswitch *esw) ...@@ -1477,6 +1480,9 @@ static void esw_destroy_restore_table(struct mlx5_eswitch *esw)
{ {
struct mlx5_esw_offload *offloads = &esw->offloads; struct mlx5_esw_offload *offloads = &esw->offloads;
if (!mlx5_eswitch_reg_c1_loopback_supported(esw))
return;
mlx5_modify_header_dealloc(esw->dev, offloads->restore_copy_hdr_id); mlx5_modify_header_dealloc(esw->dev, offloads->restore_copy_hdr_id);
mlx5_destroy_flow_group(offloads->restore_group); mlx5_destroy_flow_group(offloads->restore_group);
mlx5_destroy_flow_table(offloads->ft_offloads_restore); mlx5_destroy_flow_table(offloads->ft_offloads_restore);
...@@ -1496,6 +1502,9 @@ static int esw_create_restore_table(struct mlx5_eswitch *esw) ...@@ -1496,6 +1502,9 @@ static int esw_create_restore_table(struct mlx5_eswitch *esw)
u32 *flow_group_in; u32 *flow_group_in;
int err = 0; int err = 0;
if (!mlx5_eswitch_reg_c1_loopback_supported(esw))
return 0;
ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_OFFLOADS); ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_OFFLOADS);
if (!ns) { if (!ns) {
esw_warn(esw->dev, "Failed to get offloads flow namespace\n"); esw_warn(esw->dev, "Failed to get offloads flow namespace\n");
...@@ -1557,6 +1566,8 @@ static int esw_create_restore_table(struct mlx5_eswitch *esw) ...@@ -1557,6 +1566,8 @@ static int esw_create_restore_table(struct mlx5_eswitch *esw)
esw->offloads.restore_group = g; esw->offloads.restore_group = g;
esw->offloads.restore_copy_hdr_id = mod_hdr; esw->offloads.restore_copy_hdr_id = mod_hdr;
kvfree(flow_group_in);
return 0; return 0;
err_mod_hdr: err_mod_hdr:
...@@ -1581,13 +1592,14 @@ static int esw_offloads_start(struct mlx5_eswitch *esw, ...@@ -1581,13 +1592,14 @@ static int esw_offloads_start(struct mlx5_eswitch *esw,
return -EINVAL; return -EINVAL;
} }
mlx5_eswitch_disable(esw, false); mlx5_eswitch_disable_locked(esw, false);
mlx5_eswitch_update_num_of_vfs(esw, esw->dev->priv.sriov.num_vfs); err = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_OFFLOADS,
err = mlx5_eswitch_enable(esw, MLX5_ESWITCH_OFFLOADS); esw->dev->priv.sriov.num_vfs);
if (err) { if (err) {
NL_SET_ERR_MSG_MOD(extack, NL_SET_ERR_MSG_MOD(extack,
"Failed setting eswitch to offloads"); "Failed setting eswitch to offloads");
err1 = mlx5_eswitch_enable(esw, MLX5_ESWITCH_LEGACY); err1 = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_LEGACY,
MLX5_ESWITCH_IGNORE_NUM_VFS);
if (err1) { if (err1) {
NL_SET_ERR_MSG_MOD(extack, NL_SET_ERR_MSG_MOD(extack,
"Failed setting eswitch back to legacy"); "Failed setting eswitch back to legacy");
...@@ -2342,14 +2354,15 @@ int esw_offloads_enable(struct mlx5_eswitch *esw) ...@@ -2342,14 +2354,15 @@ int esw_offloads_enable(struct mlx5_eswitch *esw)
mutex_init(&esw->offloads.termtbl_mutex); mutex_init(&esw->offloads.termtbl_mutex);
mlx5_rdma_enable_roce(esw->dev); mlx5_rdma_enable_roce(esw->dev);
err = esw_offloads_steering_init(esw);
if (err)
goto err_steering_init;
err = esw_set_passing_vport_metadata(esw, true); err = esw_set_passing_vport_metadata(esw, true);
if (err) if (err)
goto err_vport_metadata; goto err_vport_metadata;
err = esw_offloads_steering_init(esw);
if (err)
goto err_steering_init;
/* Representor will control the vport link state */ /* Representor will control the vport link state */
mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs) mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs)
vport->info.link_state = MLX5_VPORT_ADMIN_STATE_DOWN; vport->info.link_state = MLX5_VPORT_ADMIN_STATE_DOWN;
...@@ -2371,9 +2384,9 @@ int esw_offloads_enable(struct mlx5_eswitch *esw) ...@@ -2371,9 +2384,9 @@ int esw_offloads_enable(struct mlx5_eswitch *esw)
esw_offloads_unload_rep(esw, MLX5_VPORT_UPLINK); esw_offloads_unload_rep(esw, MLX5_VPORT_UPLINK);
err_uplink: err_uplink:
esw_set_passing_vport_metadata(esw, false); esw_set_passing_vport_metadata(esw, false);
err_vport_metadata:
esw_offloads_steering_cleanup(esw);
err_steering_init: err_steering_init:
esw_offloads_steering_cleanup(esw);
err_vport_metadata:
mlx5_rdma_disable_roce(esw->dev); mlx5_rdma_disable_roce(esw->dev);
mutex_destroy(&esw->offloads.termtbl_mutex); mutex_destroy(&esw->offloads.termtbl_mutex);
return err; return err;
...@@ -2384,11 +2397,13 @@ static int esw_offloads_stop(struct mlx5_eswitch *esw, ...@@ -2384,11 +2397,13 @@ static int esw_offloads_stop(struct mlx5_eswitch *esw,
{ {
int err, err1; int err, err1;
mlx5_eswitch_disable(esw, false); mlx5_eswitch_disable_locked(esw, false);
err = mlx5_eswitch_enable(esw, MLX5_ESWITCH_LEGACY); err = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_LEGACY,
MLX5_ESWITCH_IGNORE_NUM_VFS);
if (err) { if (err) {
NL_SET_ERR_MSG_MOD(extack, "Failed setting eswitch to legacy"); NL_SET_ERR_MSG_MOD(extack, "Failed setting eswitch to legacy");
err1 = mlx5_eswitch_enable(esw, MLX5_ESWITCH_OFFLOADS); err1 = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_OFFLOADS,
MLX5_ESWITCH_IGNORE_NUM_VFS);
if (err1) { if (err1) {
NL_SET_ERR_MSG_MOD(extack, NL_SET_ERR_MSG_MOD(extack,
"Failed setting eswitch back to offloads"); "Failed setting eswitch back to offloads");
...@@ -2494,17 +2509,23 @@ static int mlx5_eswitch_check(const struct mlx5_core_dev *dev) ...@@ -2494,17 +2509,23 @@ static int mlx5_eswitch_check(const struct mlx5_core_dev *dev)
if(!MLX5_ESWITCH_MANAGER(dev)) if(!MLX5_ESWITCH_MANAGER(dev))
return -EPERM; return -EPERM;
if (dev->priv.eswitch->mode == MLX5_ESWITCH_NONE &&
!mlx5_core_is_ecpf_esw_manager(dev))
return -EOPNOTSUPP;
return 0; return 0;
} }
static int eswitch_devlink_esw_mode_check(const struct mlx5_eswitch *esw)
{
/* devlink commands in NONE eswitch mode are currently supported only
* on ECPF.
*/
return (esw->mode == MLX5_ESWITCH_NONE &&
!mlx5_core_is_ecpf_esw_manager(esw->dev)) ? -EOPNOTSUPP : 0;
}
int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode, int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
struct netlink_ext_ack *extack) struct netlink_ext_ack *extack)
{ {
struct mlx5_core_dev *dev = devlink_priv(devlink); struct mlx5_core_dev *dev = devlink_priv(devlink);
struct mlx5_eswitch *esw = dev->priv.eswitch;
u16 cur_mlx5_mode, mlx5_mode = 0; u16 cur_mlx5_mode, mlx5_mode = 0;
int err; int err;
...@@ -2512,32 +2533,50 @@ int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode, ...@@ -2512,32 +2533,50 @@ int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
if (err) if (err)
return err; return err;
cur_mlx5_mode = dev->priv.eswitch->mode;
if (esw_mode_from_devlink(mode, &mlx5_mode)) if (esw_mode_from_devlink(mode, &mlx5_mode))
return -EINVAL; return -EINVAL;
mutex_lock(&esw->mode_lock);
err = eswitch_devlink_esw_mode_check(esw);
if (err)
goto unlock;
cur_mlx5_mode = esw->mode;
if (cur_mlx5_mode == mlx5_mode) if (cur_mlx5_mode == mlx5_mode)
return 0; goto unlock;
if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV) if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV)
return esw_offloads_start(dev->priv.eswitch, extack); err = esw_offloads_start(esw, extack);
else if (mode == DEVLINK_ESWITCH_MODE_LEGACY) else if (mode == DEVLINK_ESWITCH_MODE_LEGACY)
return esw_offloads_stop(dev->priv.eswitch, extack); err = esw_offloads_stop(esw, extack);
else else
return -EINVAL; err = -EINVAL;
unlock:
mutex_unlock(&esw->mode_lock);
return err;
} }
int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode) int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
{ {
struct mlx5_core_dev *dev = devlink_priv(devlink); struct mlx5_core_dev *dev = devlink_priv(devlink);
struct mlx5_eswitch *esw = dev->priv.eswitch;
int err; int err;
err = mlx5_eswitch_check(dev); err = mlx5_eswitch_check(dev);
if (err) if (err)
return err; return err;
return esw_mode_to_devlink(dev->priv.eswitch->mode, mode); mutex_lock(&esw->mode_lock);
err = eswitch_devlink_esw_mode_check(dev->priv.eswitch);
if (err)
goto unlock;
err = esw_mode_to_devlink(esw->mode, mode);
unlock:
mutex_unlock(&esw->mode_lock);
return err;
} }
int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode, int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode,
...@@ -2552,14 +2591,20 @@ int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode, ...@@ -2552,14 +2591,20 @@ int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode,
if (err) if (err)
return err; return err;
mutex_lock(&esw->mode_lock);
err = eswitch_devlink_esw_mode_check(esw);
if (err)
goto out;
switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) { switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) {
case MLX5_CAP_INLINE_MODE_NOT_REQUIRED: case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
if (mode == DEVLINK_ESWITCH_INLINE_MODE_NONE) if (mode == DEVLINK_ESWITCH_INLINE_MODE_NONE)
return 0; goto out;
/* fall through */ /* fall through */
case MLX5_CAP_INLINE_MODE_L2: case MLX5_CAP_INLINE_MODE_L2:
NL_SET_ERR_MSG_MOD(extack, "Inline mode can't be set"); NL_SET_ERR_MSG_MOD(extack, "Inline mode can't be set");
return -EOPNOTSUPP; err = -EOPNOTSUPP;
goto out;
case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT: case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
break; break;
} }
...@@ -2567,7 +2612,8 @@ int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode, ...@@ -2567,7 +2612,8 @@ int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode,
if (atomic64_read(&esw->offloads.num_flows) > 0) { if (atomic64_read(&esw->offloads.num_flows) > 0) {
NL_SET_ERR_MSG_MOD(extack, NL_SET_ERR_MSG_MOD(extack,
"Can't set inline mode when flows are configured"); "Can't set inline mode when flows are configured");
return -EOPNOTSUPP; err = -EOPNOTSUPP;
goto out;
} }
err = esw_inline_mode_from_devlink(mode, &mlx5_mode); err = esw_inline_mode_from_devlink(mode, &mlx5_mode);
...@@ -2584,6 +2630,7 @@ int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode, ...@@ -2584,6 +2630,7 @@ int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode,
} }
esw->offloads.inline_mode = mlx5_mode; esw->offloads.inline_mode = mlx5_mode;
mutex_unlock(&esw->mode_lock);
return 0; return 0;
revert_inline_mode: revert_inline_mode:
...@@ -2593,6 +2640,7 @@ int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode, ...@@ -2593,6 +2640,7 @@ int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode,
vport, vport,
esw->offloads.inline_mode); esw->offloads.inline_mode);
out: out:
mutex_unlock(&esw->mode_lock);
return err; return err;
} }
...@@ -2606,7 +2654,15 @@ int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode) ...@@ -2606,7 +2654,15 @@ int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode)
if (err) if (err)
return err; return err;
return esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode); mutex_lock(&esw->mode_lock);
err = eswitch_devlink_esw_mode_check(esw);
if (err)
goto unlock;
err = esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode);
unlock:
mutex_unlock(&esw->mode_lock);
return err;
} }
int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink, int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink,
...@@ -2621,26 +2677,36 @@ int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink, ...@@ -2621,26 +2677,36 @@ int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink,
if (err) if (err)
return err; return err;
mutex_lock(&esw->mode_lock);
err = eswitch_devlink_esw_mode_check(esw);
if (err)
goto unlock;
if (encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE && if (encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE &&
(!MLX5_CAP_ESW_FLOWTABLE_FDB(dev, reformat) || (!MLX5_CAP_ESW_FLOWTABLE_FDB(dev, reformat) ||
!MLX5_CAP_ESW_FLOWTABLE_FDB(dev, decap))) !MLX5_CAP_ESW_FLOWTABLE_FDB(dev, decap))) {
return -EOPNOTSUPP; err = -EOPNOTSUPP;
goto unlock;
}
if (encap && encap != DEVLINK_ESWITCH_ENCAP_MODE_BASIC) if (encap && encap != DEVLINK_ESWITCH_ENCAP_MODE_BASIC) {
return -EOPNOTSUPP; err = -EOPNOTSUPP;
goto unlock;
}
if (esw->mode == MLX5_ESWITCH_LEGACY) { if (esw->mode == MLX5_ESWITCH_LEGACY) {
esw->offloads.encap = encap; esw->offloads.encap = encap;
return 0; goto unlock;
} }
if (esw->offloads.encap == encap) if (esw->offloads.encap == encap)
return 0; goto unlock;
if (atomic64_read(&esw->offloads.num_flows) > 0) { if (atomic64_read(&esw->offloads.num_flows) > 0) {
NL_SET_ERR_MSG_MOD(extack, NL_SET_ERR_MSG_MOD(extack,
"Can't set encapsulation when flows are configured"); "Can't set encapsulation when flows are configured");
return -EOPNOTSUPP; err = -EOPNOTSUPP;
goto unlock;
} }
esw_destroy_offloads_fdb_tables(esw); esw_destroy_offloads_fdb_tables(esw);
...@@ -2656,6 +2722,8 @@ int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink, ...@@ -2656,6 +2722,8 @@ int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink,
(void)esw_create_offloads_fdb_tables(esw, esw->nvports); (void)esw_create_offloads_fdb_tables(esw, esw->nvports);
} }
unlock:
mutex_unlock(&esw->mode_lock);
return err; return err;
} }
...@@ -2670,7 +2738,14 @@ int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink, ...@@ -2670,7 +2738,14 @@ int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink,
if (err) if (err)
return err; return err;
mutex_lock(&esw->mode_lock);
err = eswitch_devlink_esw_mode_check(esw);
if (err)
goto unlock;
*encap = esw->offloads.encap; *encap = esw->offloads.encap;
unlock:
mutex_unlock(&esw->mode_lock);
return 0; return 0;
} }
......
...@@ -280,7 +280,8 @@ create_fdb_chain_restore(struct fdb_chain *fdb_chain) ...@@ -280,7 +280,8 @@ create_fdb_chain_restore(struct fdb_chain *fdb_chain)
u32 index; u32 index;
int err; int err;
if (fdb_chain->chain == mlx5_esw_chains_get_ft_chain(esw)) if (fdb_chain->chain == mlx5_esw_chains_get_ft_chain(esw) ||
!mlx5_esw_chains_prios_supported(esw))
return 0; return 0;
err = mapping_add(esw_chains_mapping(esw), &fdb_chain->chain, &index); err = mapping_add(esw_chains_mapping(esw), &fdb_chain->chain, &index);
...@@ -335,6 +336,18 @@ create_fdb_chain_restore(struct fdb_chain *fdb_chain) ...@@ -335,6 +336,18 @@ create_fdb_chain_restore(struct fdb_chain *fdb_chain)
return err; return err;
} }
static void destroy_fdb_chain_restore(struct fdb_chain *fdb_chain)
{
struct mlx5_eswitch *esw = fdb_chain->esw;
if (!fdb_chain->miss_modify_hdr)
return;
mlx5_del_flow_rules(fdb_chain->restore_rule);
mlx5_modify_header_dealloc(esw->dev, fdb_chain->miss_modify_hdr);
mapping_remove(esw_chains_mapping(esw), fdb_chain->id);
}
static struct fdb_chain * static struct fdb_chain *
mlx5_esw_chains_create_fdb_chain(struct mlx5_eswitch *esw, u32 chain) mlx5_esw_chains_create_fdb_chain(struct mlx5_eswitch *esw, u32 chain)
{ {
...@@ -361,11 +374,7 @@ mlx5_esw_chains_create_fdb_chain(struct mlx5_eswitch *esw, u32 chain) ...@@ -361,11 +374,7 @@ mlx5_esw_chains_create_fdb_chain(struct mlx5_eswitch *esw, u32 chain)
return fdb_chain; return fdb_chain;
err_insert: err_insert:
if (fdb_chain->chain != mlx5_esw_chains_get_ft_chain(esw)) { destroy_fdb_chain_restore(fdb_chain);
mlx5_del_flow_rules(fdb_chain->restore_rule);
mlx5_modify_header_dealloc(esw->dev,
fdb_chain->miss_modify_hdr);
}
err_restore: err_restore:
kvfree(fdb_chain); kvfree(fdb_chain);
return ERR_PTR(err); return ERR_PTR(err);
...@@ -379,14 +388,7 @@ mlx5_esw_chains_destroy_fdb_chain(struct fdb_chain *fdb_chain) ...@@ -379,14 +388,7 @@ mlx5_esw_chains_destroy_fdb_chain(struct fdb_chain *fdb_chain)
rhashtable_remove_fast(&esw_chains_ht(esw), &fdb_chain->node, rhashtable_remove_fast(&esw_chains_ht(esw), &fdb_chain->node,
chain_params); chain_params);
if (fdb_chain->chain != mlx5_esw_chains_get_ft_chain(esw)) { destroy_fdb_chain_restore(fdb_chain);
mlx5_del_flow_rules(fdb_chain->restore_rule);
mlx5_modify_header_dealloc(esw->dev,
fdb_chain->miss_modify_hdr);
mapping_remove(esw_chains_mapping(esw), fdb_chain->id);
}
kvfree(fdb_chain); kvfree(fdb_chain);
} }
...@@ -423,7 +425,7 @@ mlx5_esw_chains_add_miss_rule(struct fdb_chain *fdb_chain, ...@@ -423,7 +425,7 @@ mlx5_esw_chains_add_miss_rule(struct fdb_chain *fdb_chain,
dest.ft = next_fdb; dest.ft = next_fdb;
if (next_fdb == tc_end_fdb(esw) && if (next_fdb == tc_end_fdb(esw) &&
fdb_modify_header_fwd_to_table_supported(esw)) { mlx5_esw_chains_prios_supported(esw)) {
act.modify_hdr = fdb_chain->miss_modify_hdr; act.modify_hdr = fdb_chain->miss_modify_hdr;
act.action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR; act.action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
} }
...@@ -728,7 +730,8 @@ mlx5_esw_chains_get_tc_end_ft(struct mlx5_eswitch *esw) ...@@ -728,7 +730,8 @@ mlx5_esw_chains_get_tc_end_ft(struct mlx5_eswitch *esw)
struct mlx5_flow_table * struct mlx5_flow_table *
mlx5_esw_chains_create_global_table(struct mlx5_eswitch *esw) mlx5_esw_chains_create_global_table(struct mlx5_eswitch *esw)
{ {
int chain, prio, level, err; u32 chain, prio, level;
int err;
if (!fdb_ignore_flow_level_supported(esw)) { if (!fdb_ignore_flow_level_supported(esw)) {
err = -EOPNOTSUPP; err = -EOPNOTSUPP;
...@@ -783,6 +786,9 @@ mlx5_esw_chains_init(struct mlx5_eswitch *esw) ...@@ -783,6 +786,9 @@ mlx5_esw_chains_init(struct mlx5_eswitch *esw)
esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE) { esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE) {
esw->fdb_table.flags &= ~ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED; esw->fdb_table.flags &= ~ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED;
esw_warn(dev, "Tc chains and priorities offload aren't supported, update firmware if needed\n"); esw_warn(dev, "Tc chains and priorities offload aren't supported, update firmware if needed\n");
} else if (!mlx5_eswitch_reg_c1_loopback_enabled(esw)) {
esw->fdb_table.flags &= ~ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED;
esw_warn(dev, "Tc chains and priorities offload aren't supported\n");
} else if (!fdb_modify_header_fwd_to_table_supported(esw)) { } else if (!fdb_modify_header_fwd_to_table_supported(esw)) {
/* Disabled when ttl workaround is needed, e.g /* Disabled when ttl workaround is needed, e.g
* when ESWITCH_IPV4_TTL_MODIFY_ENABLE = true in mlxconfig * when ESWITCH_IPV4_TTL_MODIFY_ENABLE = true in mlxconfig
......
...@@ -1322,7 +1322,7 @@ add_rule_fte(struct fs_fte *fte, ...@@ -1322,7 +1322,7 @@ add_rule_fte(struct fs_fte *fte,
fte->node.active = true; fte->node.active = true;
fte->status |= FS_FTE_STATUS_EXISTING; fte->status |= FS_FTE_STATUS_EXISTING;
atomic_inc(&fte->node.version); atomic_inc(&fg->node.version);
out: out:
return handle; return handle;
...@@ -1577,28 +1577,19 @@ struct match_list { ...@@ -1577,28 +1577,19 @@ struct match_list {
struct mlx5_flow_group *g; struct mlx5_flow_group *g;
}; };
struct match_list_head { static void free_match_list(struct match_list *head, bool ft_locked)
struct list_head list;
struct match_list first;
};
static void free_match_list(struct match_list_head *head, bool ft_locked)
{ {
if (!list_empty(&head->list)) {
struct match_list *iter, *match_tmp; struct match_list *iter, *match_tmp;
list_del(&head->first.list);
tree_put_node(&head->first.g->node, ft_locked);
list_for_each_entry_safe(iter, match_tmp, &head->list, list_for_each_entry_safe(iter, match_tmp, &head->list,
list) { list) {
tree_put_node(&iter->g->node, ft_locked); tree_put_node(&iter->g->node, ft_locked);
list_del(&iter->list); list_del(&iter->list);
kfree(iter); kfree(iter);
} }
}
} }
static int build_match_list(struct match_list_head *match_head, static int build_match_list(struct match_list *match_head,
struct mlx5_flow_table *ft, struct mlx5_flow_table *ft,
const struct mlx5_flow_spec *spec, const struct mlx5_flow_spec *spec,
bool ft_locked) bool ft_locked)
...@@ -1615,14 +1606,8 @@ static int build_match_list(struct match_list_head *match_head, ...@@ -1615,14 +1606,8 @@ static int build_match_list(struct match_list_head *match_head,
rhl_for_each_entry_rcu(g, tmp, list, hash) { rhl_for_each_entry_rcu(g, tmp, list, hash) {
struct match_list *curr_match; struct match_list *curr_match;
if (likely(list_empty(&match_head->list))) { if (unlikely(!tree_get_node(&g->node)))
if (!tree_get_node(&g->node))
continue; continue;
match_head->first.g = g;
list_add_tail(&match_head->first.list,
&match_head->list);
continue;
}
curr_match = kmalloc(sizeof(*curr_match), GFP_ATOMIC); curr_match = kmalloc(sizeof(*curr_match), GFP_ATOMIC);
if (!curr_match) { if (!curr_match) {
...@@ -1630,10 +1615,6 @@ static int build_match_list(struct match_list_head *match_head, ...@@ -1630,10 +1615,6 @@ static int build_match_list(struct match_list_head *match_head,
err = -ENOMEM; err = -ENOMEM;
goto out; goto out;
} }
if (!tree_get_node(&g->node)) {
kfree(curr_match);
continue;
}
curr_match->g = g; curr_match->g = g;
list_add_tail(&curr_match->list, &match_head->list); list_add_tail(&curr_match->list, &match_head->list);
} }
...@@ -1699,7 +1680,7 @@ try_add_to_existing_fg(struct mlx5_flow_table *ft, ...@@ -1699,7 +1680,7 @@ try_add_to_existing_fg(struct mlx5_flow_table *ft,
struct match_list *iter; struct match_list *iter;
bool take_write = false; bool take_write = false;
struct fs_fte *fte; struct fs_fte *fte;
u64 version; u64 version = 0;
int err; int err;
fte = alloc_fte(ft, spec, flow_act); fte = alloc_fte(ft, spec, flow_act);
...@@ -1707,10 +1688,12 @@ try_add_to_existing_fg(struct mlx5_flow_table *ft, ...@@ -1707,10 +1688,12 @@ try_add_to_existing_fg(struct mlx5_flow_table *ft,
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
search_again_locked: search_again_locked:
version = matched_fgs_get_version(match_head);
if (flow_act->flags & FLOW_ACT_NO_APPEND) if (flow_act->flags & FLOW_ACT_NO_APPEND)
goto skip_search; goto skip_search;
/* Try to find a fg that already contains a matching fte */ version = matched_fgs_get_version(match_head);
/* Try to find an fte with identical match value and attempt update its
* action.
*/
list_for_each_entry(iter, match_head, list) { list_for_each_entry(iter, match_head, list) {
struct fs_fte *fte_tmp; struct fs_fte *fte_tmp;
...@@ -1738,10 +1721,12 @@ try_add_to_existing_fg(struct mlx5_flow_table *ft, ...@@ -1738,10 +1721,12 @@ try_add_to_existing_fg(struct mlx5_flow_table *ft,
goto out; goto out;
} }
/* Check the fgs version, for case the new FTE with the /* Check the fgs version. If version have changed it could be that an
* same values was added while the fgs weren't locked * FTE with the same match value was added while the fgs weren't
* locked.
*/ */
if (version != matched_fgs_get_version(match_head)) { if (!(flow_act->flags & FLOW_ACT_NO_APPEND) &&
version != matched_fgs_get_version(match_head)) {
take_write = true; take_write = true;
goto search_again_locked; goto search_again_locked;
} }
...@@ -1785,9 +1770,9 @@ _mlx5_add_flow_rules(struct mlx5_flow_table *ft, ...@@ -1785,9 +1770,9 @@ _mlx5_add_flow_rules(struct mlx5_flow_table *ft,
{ {
struct mlx5_flow_steering *steering = get_steering(&ft->node); struct mlx5_flow_steering *steering = get_steering(&ft->node);
struct mlx5_flow_group *g;
struct mlx5_flow_handle *rule; struct mlx5_flow_handle *rule;
struct match_list_head match_head; struct match_list match_head;
struct mlx5_flow_group *g;
bool take_write = false; bool take_write = false;
struct fs_fte *fte; struct fs_fte *fte;
int version; int version;
......
...@@ -1211,15 +1211,10 @@ int mlx5_load_one(struct mlx5_core_dev *dev, bool boot) ...@@ -1211,15 +1211,10 @@ int mlx5_load_one(struct mlx5_core_dev *dev, bool boot)
goto err_devlink_reg; goto err_devlink_reg;
} }
if (mlx5_device_registered(dev)) { if (mlx5_device_registered(dev))
mlx5_attach_device(dev); mlx5_attach_device(dev);
} else { else
err = mlx5_register_device(dev); mlx5_register_device(dev);
if (err) {
mlx5_core_err(dev, "register device failed %d\n", err);
goto err_reg_dev;
}
}
set_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state); set_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state);
out: out:
...@@ -1227,9 +1222,6 @@ int mlx5_load_one(struct mlx5_core_dev *dev, bool boot) ...@@ -1227,9 +1222,6 @@ int mlx5_load_one(struct mlx5_core_dev *dev, bool boot)
return err; return err;
err_reg_dev:
if (boot)
mlx5_devlink_unregister(priv_to_devlink(dev));
err_devlink_reg: err_devlink_reg:
mlx5_unload(dev); mlx5_unload(dev);
err_load: err_load:
...@@ -1243,7 +1235,7 @@ int mlx5_load_one(struct mlx5_core_dev *dev, bool boot) ...@@ -1243,7 +1235,7 @@ int mlx5_load_one(struct mlx5_core_dev *dev, bool boot)
return err; return err;
} }
int mlx5_unload_one(struct mlx5_core_dev *dev, bool cleanup) void mlx5_unload_one(struct mlx5_core_dev *dev, bool cleanup)
{ {
if (cleanup) { if (cleanup) {
mlx5_unregister_device(dev); mlx5_unregister_device(dev);
...@@ -1272,7 +1264,6 @@ int mlx5_unload_one(struct mlx5_core_dev *dev, bool cleanup) ...@@ -1272,7 +1264,6 @@ int mlx5_unload_one(struct mlx5_core_dev *dev, bool cleanup)
mlx5_function_teardown(dev, cleanup); mlx5_function_teardown(dev, cleanup);
out: out:
mutex_unlock(&dev->intf_state_mutex); mutex_unlock(&dev->intf_state_mutex);
return 0;
} }
static int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx) static int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx)
...@@ -1393,12 +1384,7 @@ static void remove_one(struct pci_dev *pdev) ...@@ -1393,12 +1384,7 @@ static void remove_one(struct pci_dev *pdev)
mlx5_crdump_disable(dev); mlx5_crdump_disable(dev);
mlx5_devlink_unregister(devlink); mlx5_devlink_unregister(devlink);
if (mlx5_unload_one(dev, true)) { mlx5_unload_one(dev, true);
mlx5_core_err(dev, "mlx5_unload_one failed\n");
mlx5_health_flush(dev);
return;
}
mlx5_pci_close(dev); mlx5_pci_close(dev);
mlx5_mdev_uninit(dev); mlx5_mdev_uninit(dev);
mlx5_devlink_free(devlink); mlx5_devlink_free(devlink);
......
...@@ -182,7 +182,7 @@ void mlx5_remove_device(struct mlx5_interface *intf, struct mlx5_priv *priv); ...@@ -182,7 +182,7 @@ void mlx5_remove_device(struct mlx5_interface *intf, struct mlx5_priv *priv);
void mlx5_attach_device(struct mlx5_core_dev *dev); void mlx5_attach_device(struct mlx5_core_dev *dev);
void mlx5_detach_device(struct mlx5_core_dev *dev); void mlx5_detach_device(struct mlx5_core_dev *dev);
bool mlx5_device_registered(struct mlx5_core_dev *dev); bool mlx5_device_registered(struct mlx5_core_dev *dev);
int mlx5_register_device(struct mlx5_core_dev *dev); void mlx5_register_device(struct mlx5_core_dev *dev);
void mlx5_unregister_device(struct mlx5_core_dev *dev); void mlx5_unregister_device(struct mlx5_core_dev *dev);
void mlx5_add_dev_by_protocol(struct mlx5_core_dev *dev, int protocol); void mlx5_add_dev_by_protocol(struct mlx5_core_dev *dev, int protocol);
void mlx5_remove_dev_by_protocol(struct mlx5_core_dev *dev, int protocol); void mlx5_remove_dev_by_protocol(struct mlx5_core_dev *dev, int protocol);
...@@ -244,6 +244,6 @@ enum { ...@@ -244,6 +244,6 @@ enum {
u8 mlx5_get_nic_state(struct mlx5_core_dev *dev); u8 mlx5_get_nic_state(struct mlx5_core_dev *dev);
void mlx5_set_nic_state(struct mlx5_core_dev *dev, u8 state); void mlx5_set_nic_state(struct mlx5_core_dev *dev, u8 state);
int mlx5_unload_one(struct mlx5_core_dev *dev, bool cleanup); void mlx5_unload_one(struct mlx5_core_dev *dev, bool cleanup);
int mlx5_load_one(struct mlx5_core_dev *dev, bool boot); int mlx5_load_one(struct mlx5_core_dev *dev, bool boot);
#endif /* __MLX5_CORE_H__ */ #endif /* __MLX5_CORE_H__ */
...@@ -77,8 +77,7 @@ static int mlx5_device_enable_sriov(struct mlx5_core_dev *dev, int num_vfs) ...@@ -77,8 +77,7 @@ static int mlx5_device_enable_sriov(struct mlx5_core_dev *dev, int num_vfs)
if (!MLX5_ESWITCH_MANAGER(dev)) if (!MLX5_ESWITCH_MANAGER(dev))
goto enable_vfs_hca; goto enable_vfs_hca;
mlx5_eswitch_update_num_of_vfs(dev->priv.eswitch, num_vfs); err = mlx5_eswitch_enable(dev->priv.eswitch, num_vfs);
err = mlx5_eswitch_enable(dev->priv.eswitch, MLX5_ESWITCH_LEGACY);
if (err) { if (err) {
mlx5_core_warn(dev, mlx5_core_warn(dev,
"failed to enable eswitch SRIOV (%d)\n", err); "failed to enable eswitch SRIOV (%d)\n", err);
......
...@@ -6187,7 +6187,8 @@ static const struct genl_ops devlink_nl_ops[] = { ...@@ -6187,7 +6187,8 @@ static const struct genl_ops devlink_nl_ops[] = {
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = devlink_nl_cmd_eswitch_get_doit, .doit = devlink_nl_cmd_eswitch_get_doit,
.flags = GENL_ADMIN_PERM, .flags = GENL_ADMIN_PERM,
.internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK, .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK |
DEVLINK_NL_FLAG_NO_LOCK,
}, },
{ {
.cmd = DEVLINK_CMD_ESWITCH_SET, .cmd = DEVLINK_CMD_ESWITCH_SET,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment