Commit c2d7712c authored by Bodong Wang's avatar Bodong Wang Committed by Saeed Mahameed

net/mlx5: E-Switch, Introduce per vport configuration for eswitch modes

Both legacy and offload modes require vport setup, only offload mode
requires rep setup. Before this patch, vport and rep operations are
separated applied to all relevant vports in different stages.

Change to use per vport configuration, so that vport and rep operations
are modularized per vport.
Signed-off-by: default avatarBodong Wang <bodong@mellanox.com>
Reviewed-by: default avatarParav Pandit <parav@mellanox.com>
Signed-off-by: default avatarSaeed Mahameed <saeedm@mellanox.com>
parent d7c92cb5
......@@ -1806,12 +1806,14 @@ static void esw_vport_cleanup(struct mlx5_eswitch *esw, struct mlx5_vport *vport
esw_vport_cleanup_acl(esw, vport);
}
static int esw_enable_vport(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
static int esw_enable_vport(struct mlx5_eswitch *esw, u16 vport_num,
enum mlx5_eswitch_vport_event enabled_events)
{
u16 vport_num = vport->vport;
struct mlx5_vport *vport;
int ret;
vport = mlx5_eswitch_get_vport(esw, vport_num);
mutex_lock(&esw->state_lock);
WARN_ON(vport->enabled);
......@@ -1841,10 +1843,11 @@ static int esw_enable_vport(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
return ret;
}
static void esw_disable_vport(struct mlx5_eswitch *esw,
struct mlx5_vport *vport)
static void esw_disable_vport(struct mlx5_eswitch *esw, u16 vport_num)
{
u16 vport_num = vport->vport;
struct mlx5_vport *vport;
vport = mlx5_eswitch_get_vport(esw, vport_num);
mutex_lock(&esw->state_lock);
if (!vport->enabled)
......@@ -1950,6 +1953,32 @@ static void mlx5_eswitch_clear_vf_vports_info(struct mlx5_eswitch *esw)
/* Public E-Switch API */
#define ESW_ALLOWED(esw) ((esw) && MLX5_ESWITCH_MANAGER((esw)->dev))
static int mlx5_eswitch_load_vport(struct mlx5_eswitch *esw, u16 vport_num,
enum mlx5_eswitch_vport_event enabled_events)
{
int err;
err = esw_enable_vport(esw, vport_num, enabled_events);
if (err)
return err;
err = esw_offloads_load_rep(esw, vport_num);
if (err)
goto err_rep;
return err;
err_rep:
esw_disable_vport(esw, vport_num);
return err;
}
static void mlx5_eswitch_unload_vport(struct mlx5_eswitch *esw, u16 vport_num)
{
esw_offloads_unload_rep(esw, vport_num);
esw_disable_vport(esw, vport_num);
}
/* mlx5_eswitch_enable_pf_vf_vports() enables vports of PF, ECPF and VFs
* whichever are present on the eswitch.
*/
......@@ -1957,28 +1986,25 @@ int
mlx5_eswitch_enable_pf_vf_vports(struct mlx5_eswitch *esw,
enum mlx5_eswitch_vport_event enabled_events)
{
struct mlx5_vport *vport;
int num_vfs;
int ret;
int i;
/* Enable PF vport */
vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_PF);
ret = esw_enable_vport(esw, vport, enabled_events);
ret = mlx5_eswitch_load_vport(esw, MLX5_VPORT_PF, enabled_events);
if (ret)
return ret;
/* Enable ECPF vport */
if (mlx5_ecpf_vport_exists(esw->dev)) {
vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_ECPF);
ret = esw_enable_vport(esw, vport, enabled_events);
ret = mlx5_eswitch_load_vport(esw, MLX5_VPORT_ECPF, enabled_events);
if (ret)
goto ecpf_err;
}
/* Enable VF vports */
mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs) {
ret = esw_enable_vport(esw, vport, enabled_events);
mlx5_esw_for_each_vf_vport_num(esw, i, esw->esw_funcs.num_vfs) {
ret = mlx5_eswitch_load_vport(esw, i, enabled_events);
if (ret)
goto vf_err;
}
......@@ -1986,17 +2012,14 @@ mlx5_eswitch_enable_pf_vf_vports(struct mlx5_eswitch *esw,
vf_err:
num_vfs = i - 1;
mlx5_esw_for_each_vf_vport_reverse(esw, i, vport, num_vfs)
esw_disable_vport(esw, vport);
mlx5_esw_for_each_vf_vport_num_reverse(esw, i, num_vfs)
mlx5_eswitch_unload_vport(esw, i);
if (mlx5_ecpf_vport_exists(esw->dev)) {
vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_ECPF);
esw_disable_vport(esw, vport);
}
if (mlx5_ecpf_vport_exists(esw->dev))
mlx5_eswitch_unload_vport(esw, MLX5_VPORT_ECPF);
ecpf_err:
vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_PF);
esw_disable_vport(esw, vport);
mlx5_eswitch_unload_vport(esw, MLX5_VPORT_PF);
return ret;
}
......@@ -2005,11 +2028,15 @@ mlx5_eswitch_enable_pf_vf_vports(struct mlx5_eswitch *esw,
*/
void mlx5_eswitch_disable_pf_vf_vports(struct mlx5_eswitch *esw)
{
struct mlx5_vport *vport;
int i;
mlx5_esw_for_all_vports_reverse(esw, i, vport)
esw_disable_vport(esw, vport);
mlx5_esw_for_each_vf_vport_num_reverse(esw, i, esw->esw_funcs.num_vfs)
mlx5_eswitch_unload_vport(esw, i);
if (mlx5_ecpf_vport_exists(esw->dev))
mlx5_eswitch_unload_vport(esw, MLX5_VPORT_ECPF);
mlx5_eswitch_unload_vport(esw, MLX5_VPORT_PF);
}
static void mlx5_eswitch_get_devlink_param(struct mlx5_eswitch *esw)
......
......@@ -651,6 +651,9 @@ esw_add_restore_rule(struct mlx5_eswitch *esw, u32 tag);
u32
esw_get_max_restore_tag(struct mlx5_eswitch *esw);
int esw_offloads_load_rep(struct mlx5_eswitch *esw, u16 vport_num);
void esw_offloads_unload_rep(struct mlx5_eswitch *esw, u16 vport_num);
#else /* CONFIG_MLX5_ESWITCH */
/* eswitch API stubs */
static inline int mlx5_eswitch_init(struct mlx5_core_dev *dev) { return 0; }
......
......@@ -1678,14 +1678,6 @@ static void __unload_reps_all_vport(struct mlx5_eswitch *esw, u8 rep_type)
__unload_reps_special_vport(esw, rep_type);
}
static void esw_offloads_unload_all_reps(struct mlx5_eswitch *esw)
{
u8 rep_type = NUM_REP_TYPES;
while (rep_type-- > 0)
__unload_reps_all_vport(esw, rep_type);
}
static int __esw_offloads_load_rep(struct mlx5_eswitch *esw,
struct mlx5_eswitch_rep *rep, u8 rep_type)
{
......@@ -1702,44 +1694,6 @@ static int __esw_offloads_load_rep(struct mlx5_eswitch *esw,
return err;
}
static int __load_reps_special_vport(struct mlx5_eswitch *esw, u8 rep_type)
{
struct mlx5_eswitch_rep *rep;
int err;
rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_UPLINK);
err = __esw_offloads_load_rep(esw, rep, rep_type);
if (err)
return err;
if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_PF);
err = __esw_offloads_load_rep(esw, rep, rep_type);
if (err)
goto err_pf;
}
if (mlx5_ecpf_vport_exists(esw->dev)) {
rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_ECPF);
err = __esw_offloads_load_rep(esw, rep, rep_type);
if (err)
goto err_ecpf;
}
return 0;
err_ecpf:
if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_PF);
__esw_offloads_unload_rep(esw, rep, rep_type);
}
err_pf:
rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_UPLINK);
__esw_offloads_unload_rep(esw, rep, rep_type);
return err;
}
static int __load_reps_vf_vport(struct mlx5_eswitch *esw, int nvports,
u8 rep_type)
{
......@@ -1759,26 +1713,6 @@ static int __load_reps_vf_vport(struct mlx5_eswitch *esw, int nvports,
return err;
}
static int __load_reps_all_vport(struct mlx5_eswitch *esw, u8 rep_type)
{
int err;
/* Special vports must be loaded first, uplink rep creates mdev resource. */
err = __load_reps_special_vport(esw, rep_type);
if (err)
return err;
err = __load_reps_vf_vport(esw, esw->esw_funcs.num_vfs, rep_type);
if (err)
goto err_vfs;
return 0;
err_vfs:
__unload_reps_special_vport(esw, rep_type);
return err;
}
static int esw_offloads_load_vf_reps(struct mlx5_eswitch *esw, int nvports)
{
u8 rep_type = 0;
......@@ -1798,25 +1732,46 @@ static int esw_offloads_load_vf_reps(struct mlx5_eswitch *esw, int nvports)
return err;
}
static int esw_offloads_load_all_reps(struct mlx5_eswitch *esw)
int esw_offloads_load_rep(struct mlx5_eswitch *esw, u16 vport_num)
{
u8 rep_type = 0;
struct mlx5_eswitch_rep *rep;
int rep_type;
int err;
for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++) {
err = __load_reps_all_vport(esw, rep_type);
if (esw->mode != MLX5_ESWITCH_OFFLOADS)
return 0;
rep = mlx5_eswitch_get_rep(esw, vport_num);
for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++)
if (atomic_cmpxchg(&rep->rep_data[rep_type].state,
REP_REGISTERED, REP_LOADED) == REP_REGISTERED) {
err = esw->offloads.rep_ops[rep_type]->load(esw->dev, rep);
if (err)
goto err_reps;
}
return err;
return 0;
err_reps:
while (rep_type-- > 0)
__unload_reps_all_vport(esw, rep_type);
atomic_set(&rep->rep_data[rep_type].state, REP_REGISTERED);
for (--rep_type; rep_type >= 0; rep_type--)
__esw_offloads_unload_rep(esw, rep, rep_type);
return err;
}
void esw_offloads_unload_rep(struct mlx5_eswitch *esw, u16 vport_num)
{
struct mlx5_eswitch_rep *rep;
int rep_type;
if (esw->mode != MLX5_ESWITCH_OFFLOADS)
return;
rep = mlx5_eswitch_get_rep(esw, vport_num);
for (rep_type = NUM_REP_TYPES - 1; rep_type >= 0; rep_type--)
__esw_offloads_unload_rep(esw, rep, rep_type);
}
#define ESW_OFFLOADS_DEVCOM_PAIR (0)
#define ESW_OFFLOADS_DEVCOM_UNPAIR (1)
......@@ -2466,22 +2421,23 @@ int esw_offloads_enable(struct mlx5_eswitch *esw)
mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs)
vport->info.link_state = MLX5_VPORT_ADMIN_STATE_DOWN;
err = mlx5_eswitch_enable_pf_vf_vports(esw, MLX5_VPORT_UC_ADDR_CHANGE);
/* Uplink vport rep must load first. */
err = esw_offloads_load_rep(esw, MLX5_VPORT_UPLINK);
if (err)
goto err_vports;
goto err_uplink;
err = esw_offloads_load_all_reps(esw);
err = mlx5_eswitch_enable_pf_vf_vports(esw, MLX5_VPORT_UC_ADDR_CHANGE);
if (err)
goto err_reps;
goto err_vports;
esw_offloads_devcom_init(esw);
mutex_init(&esw->offloads.termtbl_mutex);
return 0;
err_reps:
mlx5_eswitch_disable_pf_vf_vports(esw);
err_vports:
esw_offloads_unload_rep(esw, MLX5_VPORT_UPLINK);
err_uplink:
esw_set_passing_vport_metadata(esw, false);
err_vport_metadata:
esw_offloads_steering_cleanup(esw);
......@@ -2512,8 +2468,8 @@ static int esw_offloads_stop(struct mlx5_eswitch *esw,
void esw_offloads_disable(struct mlx5_eswitch *esw)
{
esw_offloads_devcom_cleanup(esw);
esw_offloads_unload_all_reps(esw);
mlx5_eswitch_disable_pf_vf_vports(esw);
esw_offloads_unload_rep(esw, MLX5_VPORT_UPLINK);
esw_set_passing_vport_metadata(esw, false);
esw_offloads_steering_cleanup(esw);
mlx5_rdma_disable_roce(esw->dev);
......@@ -2786,6 +2742,21 @@ int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink,
return 0;
}
static bool
mlx5_eswitch_vport_has_rep(const struct mlx5_eswitch *esw, u16 vport_num)
{
/* Currently, only ECPF based device has representor for host PF. */
if (vport_num == MLX5_VPORT_PF &&
!mlx5_core_is_ecpf_esw_manager(esw->dev))
return false;
if (vport_num == MLX5_VPORT_ECPF &&
!mlx5_ecpf_vport_exists(esw->dev))
return false;
return true;
}
void mlx5_eswitch_register_vport_reps(struct mlx5_eswitch *esw,
const struct mlx5_eswitch_rep_ops *ops,
u8 rep_type)
......@@ -2796,9 +2767,11 @@ void mlx5_eswitch_register_vport_reps(struct mlx5_eswitch *esw,
esw->offloads.rep_ops[rep_type] = ops;
mlx5_esw_for_all_reps(esw, i, rep) {
if (likely(mlx5_eswitch_vport_has_rep(esw, i))) {
rep_data = &rep->rep_data[rep_type];
atomic_set(&rep_data->state, REP_REGISTERED);
}
}
}
EXPORT_SYMBOL(mlx5_eswitch_register_vport_reps);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment