Commit 47dd7e60 authored by Parav Pandit's avatar Parav Pandit Committed by Saeed Mahameed

net/mlx5: E-Switch, Use xarray for vport number to vport and rep mapping

Currently vport number to vport and its representor are mapped using an
array and an index.

Vport numbers of different types of functions are not contiguous. Adding
new such discontiguous range using index and number mapping is increasingly
complex and hard to maintain.

Hence, maintain an xarray of vport and rep whose lookup is done based on
the vport number.
Each VF and SF entry is marked with a xarray mark to identify the function
type. Additionally PF and VF needs special handling for legacy inline
mode. They are additionally marked as host function using additional
HOST_FN mark.
Signed-off-by: default avatarParav Pandit <parav@nvidia.com>
Reviewed-by: default avatarRoi Dayan <roid@nvidia.com>
Reviewed-by: default avatarVu Pham <vuhuong@nvidia.com>
Signed-off-by: default avatarSaeed Mahameed <saeedm@nvidia.com>
parent 9f8c7100
......@@ -96,7 +96,7 @@ int esw_acl_egress_lgcy_setup(struct mlx5_eswitch *esw,
}
if (!vport->egress.acl) {
vport->egress.acl = esw_acl_table_create(esw, vport->vport,
vport->egress.acl = esw_acl_table_create(esw, vport,
MLX5_FLOW_NAMESPACE_ESW_EGRESS,
table_size);
if (IS_ERR(vport->egress.acl)) {
......
......@@ -148,7 +148,7 @@ static void esw_acl_egress_ofld_groups_destroy(struct mlx5_vport *vport)
esw_acl_egress_vlan_grp_destroy(vport);
}
static bool esw_acl_egress_needed(const struct mlx5_eswitch *esw, u16 vport_num)
static bool esw_acl_egress_needed(struct mlx5_eswitch *esw, u16 vport_num)
{
return mlx5_eswitch_is_vf_vport(esw, vport_num) || mlx5_esw_is_sf_vport(esw, vport_num);
}
......@@ -171,7 +171,7 @@ int esw_acl_egress_ofld_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport
table_size++;
if (MLX5_CAP_GEN(esw->dev, prio_tag_required))
table_size++;
vport->egress.acl = esw_acl_table_create(esw, vport->vport,
vport->egress.acl = esw_acl_table_create(esw, vport,
MLX5_FLOW_NAMESPACE_ESW_EGRESS, table_size);
if (IS_ERR(vport->egress.acl)) {
err = PTR_ERR(vport->egress.acl);
......
......@@ -6,14 +6,14 @@
#include "helper.h"
struct mlx5_flow_table *
esw_acl_table_create(struct mlx5_eswitch *esw, u16 vport_num, int ns, int size)
esw_acl_table_create(struct mlx5_eswitch *esw, struct mlx5_vport *vport, int ns, int size)
{
struct mlx5_flow_table_attr ft_attr = {};
struct mlx5_core_dev *dev = esw->dev;
struct mlx5_flow_namespace *root_ns;
struct mlx5_flow_table *acl;
int acl_supported;
int vport_index;
u16 vport_num;
int err;
acl_supported = (ns == MLX5_FLOW_NAMESPACE_ESW_INGRESS) ?
......@@ -23,11 +23,11 @@ esw_acl_table_create(struct mlx5_eswitch *esw, u16 vport_num, int ns, int size)
if (!acl_supported)
return ERR_PTR(-EOPNOTSUPP);
vport_num = vport->vport;
esw_debug(dev, "Create vport[%d] %s ACL table\n", vport_num,
ns == MLX5_FLOW_NAMESPACE_ESW_INGRESS ? "ingress" : "egress");
vport_index = mlx5_eswitch_vport_num_to_index(esw, vport_num);
root_ns = mlx5_get_flow_vport_acl_namespace(dev, ns, vport_index);
root_ns = mlx5_get_flow_vport_acl_namespace(dev, ns, vport->index);
if (!root_ns) {
esw_warn(dev, "Failed to get E-Switch root namespace for vport (%d)\n",
vport_num);
......
......@@ -8,7 +8,7 @@
/* General acl helper functions */
struct mlx5_flow_table *
esw_acl_table_create(struct mlx5_eswitch *esw, u16 vport_num, int ns, int size);
esw_acl_table_create(struct mlx5_eswitch *esw, struct mlx5_vport *vport, int ns, int size);
/* Egress acl helper functions */
void esw_acl_egress_table_destroy(struct mlx5_vport *vport);
......
......@@ -177,7 +177,7 @@ int esw_acl_ingress_lgcy_setup(struct mlx5_eswitch *esw,
}
if (!vport->ingress.acl) {
vport->ingress.acl = esw_acl_table_create(esw, vport->vport,
vport->ingress.acl = esw_acl_table_create(esw, vport,
MLX5_FLOW_NAMESPACE_ESW_INGRESS,
table_size);
if (IS_ERR(vport->ingress.acl)) {
......
......@@ -7,7 +7,7 @@
#include "ofld.h"
static bool
esw_acl_ingress_prio_tag_enabled(const struct mlx5_eswitch *esw,
esw_acl_ingress_prio_tag_enabled(struct mlx5_eswitch *esw,
const struct mlx5_vport *vport)
{
return (MLX5_CAP_GEN(esw->dev, prio_tag_required) &&
......@@ -255,7 +255,7 @@ int esw_acl_ingress_ofld_setup(struct mlx5_eswitch *esw,
if (esw_acl_ingress_prio_tag_enabled(esw, vport))
num_ftes++;
vport->ingress.acl = esw_acl_table_create(esw, vport->vport,
vport->ingress.acl = esw_acl_table_create(esw, vport,
MLX5_FLOW_NAMESPACE_ESW_INGRESS,
num_ftes);
if (IS_ERR(vport->ingress.acl)) {
......
......@@ -14,8 +14,7 @@ mlx5_esw_get_port_parent_id(struct mlx5_core_dev *dev, struct netdev_phys_item_i
memcpy(ppid->id, &parent_id, sizeof(parent_id));
}
static bool
mlx5_esw_devlink_port_supported(const struct mlx5_eswitch *esw, u16 vport_num)
static bool mlx5_esw_devlink_port_supported(struct mlx5_eswitch *esw, u16 vport_num)
{
return vport_num == MLX5_VPORT_UPLINK ||
(mlx5_core_is_ecpf(esw->dev) && vport_num == MLX5_VPORT_PF) ||
......
......@@ -216,7 +216,8 @@ static void esw_destroy_legacy_table(struct mlx5_eswitch *esw)
int esw_legacy_enable(struct mlx5_eswitch *esw)
{
struct mlx5_vport *vport;
int ret, i;
unsigned long i;
int ret;
ret = esw_create_legacy_table(esw);
if (ret)
......
......@@ -88,20 +88,17 @@ struct mlx5_eswitch *mlx5_devlink_eswitch_get(struct devlink *devlink)
struct mlx5_vport *__must_check
mlx5_eswitch_get_vport(struct mlx5_eswitch *esw, u16 vport_num)
{
u16 idx;
struct mlx5_vport *vport;
if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager))
return ERR_PTR(-EPERM);
idx = mlx5_eswitch_vport_num_to_index(esw, vport_num);
if (idx > esw->total_vports - 1) {
esw_debug(esw->dev, "vport out of range: num(0x%x), idx(0x%x)\n",
vport_num, idx);
vport = xa_load(&esw->vports, vport_num);
if (!vport) {
esw_debug(esw->dev, "vport out of range: num(0x%x)\n", vport_num);
return ERR_PTR(-EINVAL);
}
return &esw->vports[idx];
return vport;
}
static int arm_vport_context_events_cmd(struct mlx5_core_dev *dev, u16 vport,
......@@ -345,9 +342,10 @@ static void update_allmulti_vports(struct mlx5_eswitch *esw,
{
u8 *mac = vaddr->node.addr;
struct mlx5_vport *vport;
u16 i, vport_num;
unsigned long i;
u16 vport_num;
mlx5_esw_for_all_vports(esw, i, vport) {
mlx5_esw_for_each_vport(esw, i, vport) {
struct hlist_head *vport_hash = vport->mc_list;
struct vport_addr *iter_vaddr =
l2addr_hash_find(vport_hash,
......@@ -1175,7 +1173,7 @@ static void mlx5_eswitch_event_handlers_unregister(struct mlx5_eswitch *esw)
static void mlx5_eswitch_clear_vf_vports_info(struct mlx5_eswitch *esw)
{
struct mlx5_vport *vport;
int i;
unsigned long i;
mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs) {
memset(&vport->qos, 0, sizeof(vport->qos));
......@@ -1213,20 +1211,25 @@ void mlx5_eswitch_unload_vport(struct mlx5_eswitch *esw, u16 vport_num)
void mlx5_eswitch_unload_vf_vports(struct mlx5_eswitch *esw, u16 num_vfs)
{
int i;
struct mlx5_vport *vport;
unsigned long i;
mlx5_esw_for_each_vf_vport_num_reverse(esw, i, num_vfs)
mlx5_eswitch_unload_vport(esw, i);
mlx5_esw_for_each_vf_vport(esw, i, vport, num_vfs) {
if (!vport->enabled)
continue;
mlx5_eswitch_unload_vport(esw, vport->vport);
}
}
int mlx5_eswitch_load_vf_vports(struct mlx5_eswitch *esw, u16 num_vfs,
enum mlx5_eswitch_vport_event enabled_events)
{
struct mlx5_vport *vport;
unsigned long i;
int err;
int i;
mlx5_esw_for_each_vf_vport_num(esw, i, num_vfs) {
err = mlx5_eswitch_load_vport(esw, i, enabled_events);
mlx5_esw_for_each_vf_vport(esw, i, vport, num_vfs) {
err = mlx5_eswitch_load_vport(esw, vport->vport, enabled_events);
if (err)
goto vf_err;
}
......@@ -1234,7 +1237,7 @@ int mlx5_eswitch_load_vf_vports(struct mlx5_eswitch *esw, u16 num_vfs,
return 0;
vf_err:
mlx5_eswitch_unload_vf_vports(esw, i - 1);
mlx5_eswitch_unload_vf_vports(esw, num_vfs);
return err;
}
......@@ -1563,24 +1566,106 @@ void mlx5_eswitch_disable(struct mlx5_eswitch *esw, bool clear_vf)
up_write(&esw->mode_lock);
}
static int mlx5_esw_vport_alloc(struct mlx5_eswitch *esw, struct mlx5_core_dev *dev,
int index, u16 vport_num)
{
struct mlx5_vport *vport;
int err;
vport = kzalloc(sizeof(*vport), GFP_KERNEL);
if (!vport)
return -ENOMEM;
vport->dev = esw->dev;
vport->vport = vport_num;
vport->index = index;
vport->info.link_state = MLX5_VPORT_ADMIN_STATE_AUTO;
INIT_WORK(&vport->vport_change_handler, esw_vport_change_handler);
err = xa_insert(&esw->vports, vport_num, vport, GFP_KERNEL);
if (err)
goto insert_err;
esw->total_vports++;
return 0;
insert_err:
kfree(vport);
return err;
}
static void mlx5_esw_vport_free(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
{
xa_erase(&esw->vports, vport->vport);
kfree(vport);
}
static void mlx5_esw_vports_cleanup(struct mlx5_eswitch *esw)
{
struct mlx5_vport *vport;
unsigned long i;
mlx5_esw_for_each_vport(esw, i, vport)
mlx5_esw_vport_free(esw, vport);
xa_destroy(&esw->vports);
}
static int mlx5_esw_vports_init(struct mlx5_eswitch *esw)
{
struct mlx5_core_dev *dev = esw->dev;
u16 base_sf_num;
int idx = 0;
int err;
int i;
xa_init(&esw->vports);
err = mlx5_esw_vport_alloc(esw, dev, idx, MLX5_VPORT_PF);
if (err)
goto err;
if (esw->first_host_vport == MLX5_VPORT_PF)
xa_set_mark(&esw->vports, idx, MLX5_ESW_VPT_HOST_FN);
idx++;
for (i = 0; i < mlx5_core_max_vfs(dev); i++) {
err = mlx5_esw_vport_alloc(esw, dev, idx, idx);
if (err)
goto err;
xa_set_mark(&esw->vports, idx, MLX5_ESW_VPT_VF);
xa_set_mark(&esw->vports, idx, MLX5_ESW_VPT_HOST_FN);
idx++;
}
base_sf_num = mlx5_sf_start_function_id(dev);
for (i = 0; i < mlx5_sf_max_functions(dev); i++) {
err = mlx5_esw_vport_alloc(esw, dev, idx, base_sf_num + i);
if (err)
goto err;
xa_set_mark(&esw->vports, base_sf_num + i, MLX5_ESW_VPT_SF);
idx++;
}
if (mlx5_ecpf_vport_exists(dev)) {
err = mlx5_esw_vport_alloc(esw, dev, idx, MLX5_VPORT_ECPF);
if (err)
goto err;
idx++;
}
err = mlx5_esw_vport_alloc(esw, dev, idx, MLX5_VPORT_UPLINK);
if (err)
goto err;
return 0;
err:
mlx5_esw_vports_cleanup(esw);
return err;
}
int mlx5_eswitch_init(struct mlx5_core_dev *dev)
{
struct mlx5_eswitch *esw;
struct mlx5_vport *vport;
int total_vports;
int err, i;
int err;
if (!MLX5_VPORT_MANAGER(dev))
return 0;
total_vports = MLX5_SPECIAL_VPORTS(dev) + mlx5_core_max_vfs(dev) +
mlx5_sf_max_functions(dev);
esw_info(dev,
"Total vports %d, per vport: max uc(%d) max mc(%d)\n",
total_vports,
MLX5_MAX_UC_PER_VPORT(dev),
MLX5_MAX_MC_PER_VPORT(dev));
esw = kzalloc(sizeof(*esw), GFP_KERNEL);
if (!esw)
return -ENOMEM;
......@@ -1595,18 +1680,13 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
goto abort;
}
esw->vports = kcalloc(total_vports, sizeof(struct mlx5_vport),
GFP_KERNEL);
if (!esw->vports) {
err = -ENOMEM;
err = mlx5_esw_vports_init(esw);
if (err)
goto abort;
}
esw->total_vports = total_vports;
err = esw_offloads_init_reps(esw);
if (err)
goto abort;
goto reps_err;
mutex_init(&esw->offloads.encap_tbl_lock);
hash_init(esw->offloads.encap_tbl);
......@@ -1619,25 +1699,25 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
mutex_init(&esw->state_lock);
init_rwsem(&esw->mode_lock);
mlx5_esw_for_all_vports(esw, i, vport) {
vport->vport = mlx5_eswitch_index_to_vport_num(esw, i);
vport->info.link_state = MLX5_VPORT_ADMIN_STATE_AUTO;
vport->dev = dev;
INIT_WORK(&vport->vport_change_handler,
esw_vport_change_handler);
}
esw->enabled_vports = 0;
esw->mode = MLX5_ESWITCH_NONE;
esw->offloads.inline_mode = MLX5_INLINE_MODE_NONE;
dev->priv.eswitch = esw;
BLOCKING_INIT_NOTIFIER_HEAD(&esw->n_head);
esw_info(dev,
"Total vports %d, per vport: max uc(%d) max mc(%d)\n",
esw->total_vports,
MLX5_MAX_UC_PER_VPORT(dev),
MLX5_MAX_MC_PER_VPORT(dev));
return 0;
reps_err:
mlx5_esw_vports_cleanup(esw);
abort:
if (esw->work_queue)
destroy_workqueue(esw->work_queue);
kfree(esw->vports);
kfree(esw);
return err;
}
......@@ -1659,7 +1739,7 @@ void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw)
mutex_destroy(&esw->offloads.encap_tbl_lock);
mutex_destroy(&esw->offloads.decap_tbl_lock);
esw_offloads_cleanup_reps(esw);
kfree(esw->vports);
mlx5_esw_vports_cleanup(esw);
kfree(esw);
}
......@@ -1718,8 +1798,29 @@ int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
return err;
}
static bool mlx5_esw_check_port_type(struct mlx5_eswitch *esw, u16 vport_num, xa_mark_t mark)
{
struct mlx5_vport *vport;
vport = mlx5_eswitch_get_vport(esw, vport_num);
if (IS_ERR(vport))
return false;
return xa_get_mark(&esw->vports, vport_num, mark);
}
bool mlx5_eswitch_is_vf_vport(struct mlx5_eswitch *esw, u16 vport_num)
{
return mlx5_esw_check_port_type(esw, vport_num, MLX5_ESW_VPT_VF);
}
bool mlx5_esw_is_sf_vport(struct mlx5_eswitch *esw, u16 vport_num)
{
return mlx5_esw_check_port_type(esw, vport_num, MLX5_ESW_VPT_SF);
}
static bool
is_port_function_supported(const struct mlx5_eswitch *esw, u16 vport_num)
is_port_function_supported(struct mlx5_eswitch *esw, u16 vport_num)
{
return vport_num == MLX5_VPORT_PF ||
mlx5_eswitch_is_vf_vport(esw, vport_num) ||
......@@ -1891,9 +1992,9 @@ static u32 calculate_vports_min_rate_divider(struct mlx5_eswitch *esw)
u32 fw_max_bw_share = MLX5_CAP_QOS(esw->dev, max_tsar_bw_share);
struct mlx5_vport *evport;
u32 max_guarantee = 0;
int i;
unsigned long i;
mlx5_esw_for_all_vports(esw, i, evport) {
mlx5_esw_for_each_vport(esw, i, evport) {
if (!evport->enabled || evport->qos.min_rate < max_guarantee)
continue;
max_guarantee = evport->qos.min_rate;
......@@ -1911,11 +2012,11 @@ static int normalize_vports_min_rate(struct mlx5_eswitch *esw)
struct mlx5_vport *evport;
u32 vport_max_rate;
u32 vport_min_rate;
unsigned long i;
u32 bw_share;
int err;
int i;
mlx5_esw_for_all_vports(esw, i, evport) {
mlx5_esw_for_each_vport(esw, i, evport) {
if (!evport->enabled)
continue;
vport_min_rate = evport->qos.min_rate;
......
......@@ -176,6 +176,7 @@ struct mlx5_vport {
u16 vport;
bool enabled;
enum mlx5_eswitch_vport_event enabled_events;
int index;
struct devlink_port *dl_port;
};
......@@ -228,7 +229,7 @@ struct mlx5_esw_offload {
struct mlx5_flow_table *ft_offloads;
struct mlx5_flow_group *vport_rx_group;
struct mlx5_eswitch_rep *vport_reps;
struct xarray vport_reps;
struct list_head peer_flows;
struct mutex peer_mutex;
struct mutex encap_tbl_lock; /* protects encap_tbl */
......@@ -278,7 +279,7 @@ struct mlx5_eswitch {
struct esw_mc_addr mc_promisc;
/* end of legacy */
struct workqueue_struct *work_queue;
struct mlx5_vport *vports;
struct xarray vports;
u32 flags;
int total_vports;
int enabled_vports;
......@@ -545,102 +546,11 @@ static inline u16 mlx5_eswitch_first_host_vport_num(struct mlx5_core_dev *dev)
MLX5_VPORT_PF : MLX5_VPORT_FIRST_VF;
}
#define MLX5_VPORT_PF_PLACEHOLDER (1u)
#define MLX5_VPORT_UPLINK_PLACEHOLDER (1u)
#define MLX5_VPORT_ECPF_PLACEHOLDER(mdev) (mlx5_ecpf_vport_exists(mdev))
#define MLX5_SPECIAL_VPORTS(mdev) (MLX5_VPORT_PF_PLACEHOLDER + \
MLX5_VPORT_UPLINK_PLACEHOLDER + \
MLX5_VPORT_ECPF_PLACEHOLDER(mdev))
static inline int mlx5_esw_sf_start_idx(const struct mlx5_eswitch *esw)
{
/* PF and VF vports indices start from 0 to max_vfs */
return MLX5_VPORT_PF_PLACEHOLDER + mlx5_core_max_vfs(esw->dev);
}
static inline int mlx5_esw_sf_end_idx(const struct mlx5_eswitch *esw)
{
return mlx5_esw_sf_start_idx(esw) + mlx5_sf_max_functions(esw->dev);
}
static inline int
mlx5_esw_sf_vport_num_to_index(const struct mlx5_eswitch *esw, u16 vport_num)
{
return vport_num - mlx5_sf_start_function_id(esw->dev) +
MLX5_VPORT_PF_PLACEHOLDER + mlx5_core_max_vfs(esw->dev);
}
static inline u16
mlx5_esw_sf_vport_index_to_num(const struct mlx5_eswitch *esw, int idx)
{
return mlx5_sf_start_function_id(esw->dev) + idx -
(MLX5_VPORT_PF_PLACEHOLDER + mlx5_core_max_vfs(esw->dev));
}
static inline bool
mlx5_esw_is_sf_vport(const struct mlx5_eswitch *esw, u16 vport_num)
{
return mlx5_sf_supported(esw->dev) &&
vport_num >= mlx5_sf_start_function_id(esw->dev) &&
(vport_num < (mlx5_sf_start_function_id(esw->dev) +
mlx5_sf_max_functions(esw->dev)));
}
static inline bool mlx5_eswitch_is_funcs_handler(const struct mlx5_core_dev *dev)
{
return mlx5_core_is_ecpf_esw_manager(dev);
}
static inline int mlx5_eswitch_uplink_idx(struct mlx5_eswitch *esw)
{
/* Uplink always locate at the last element of the array.*/
return esw->total_vports - 1;
}
static inline int mlx5_eswitch_ecpf_idx(struct mlx5_eswitch *esw)
{
return esw->total_vports - 2;
}
static inline int mlx5_eswitch_vport_num_to_index(struct mlx5_eswitch *esw,
u16 vport_num)
{
if (vport_num == MLX5_VPORT_ECPF) {
if (!mlx5_ecpf_vport_exists(esw->dev))
esw_warn(esw->dev, "ECPF vport doesn't exist!\n");
return mlx5_eswitch_ecpf_idx(esw);
}
if (vport_num == MLX5_VPORT_UPLINK)
return mlx5_eswitch_uplink_idx(esw);
if (mlx5_esw_is_sf_vport(esw, vport_num))
return mlx5_esw_sf_vport_num_to_index(esw, vport_num);
/* PF and VF vports start from 0 to max_vfs */
return vport_num;
}
static inline u16 mlx5_eswitch_index_to_vport_num(struct mlx5_eswitch *esw,
int index)
{
if (index == mlx5_eswitch_ecpf_idx(esw) &&
mlx5_ecpf_vport_exists(esw->dev))
return MLX5_VPORT_ECPF;
if (index == mlx5_eswitch_uplink_idx(esw))
return MLX5_VPORT_UPLINK;
/* SF vports indices are after VFs and before ECPF */
if (mlx5_sf_supported(esw->dev) &&
index > mlx5_core_max_vfs(esw->dev))
return mlx5_esw_sf_vport_index_to_num(esw, index);
/* PF and VF vports start from 0 to max_vfs */
return index;
}
static inline unsigned int
mlx5_esw_vport_to_devlink_port_index(const struct mlx5_core_dev *dev,
u16 vport_num)
......@@ -657,82 +567,42 @@ mlx5_esw_devlink_port_index_to_vport_num(unsigned int dl_port_index)
/* TODO: This mlx5e_tc function shouldn't be called by eswitch */
void mlx5e_tc_clean_fdb_peer_flows(struct mlx5_eswitch *esw);
/* The vport getter/iterator are only valid after esw->total_vports
* and vport->vport are initialized in mlx5_eswitch_init.
/* Each mark identifies eswitch vport type.
* MLX5_ESW_VPT_HOST_FN is used to identify both PF and VF ports using
* a single mark.
* MLX5_ESW_VPT_VF identifies a SRIOV VF vport.
* MLX5_ESW_VPT_SF identifies SF vport.
*/
#define mlx5_esw_for_all_vports(esw, i, vport) \
for ((i) = MLX5_VPORT_PF; \
(vport) = &(esw)->vports[i], \
(i) < (esw)->total_vports; (i)++)
#define mlx5_esw_for_all_vports_reverse(esw, i, vport) \
for ((i) = (esw)->total_vports - 1; \
(vport) = &(esw)->vports[i], \
(i) >= MLX5_VPORT_PF; (i)--)
#define mlx5_esw_for_each_vf_vport(esw, i, vport, nvfs) \
for ((i) = MLX5_VPORT_FIRST_VF; \
(vport) = &(esw)->vports[(i)], \
(i) <= (nvfs); (i)++)
#define mlx5_esw_for_each_vf_vport_reverse(esw, i, vport, nvfs) \
for ((i) = (nvfs); \
(vport) = &(esw)->vports[(i)], \
(i) >= MLX5_VPORT_FIRST_VF; (i)--)
/* The rep getter/iterator are only valid after esw->total_vports
* and vport->vport are initialized in mlx5_eswitch_init.
#define MLX5_ESW_VPT_HOST_FN XA_MARK_0
#define MLX5_ESW_VPT_VF XA_MARK_1
#define MLX5_ESW_VPT_SF XA_MARK_2
/* The vport iterator is valid only after vport are initialized in mlx5_eswitch_init.
* Borrowed the idea from xa_for_each_marked() but with support for desired last element.
*/
#define mlx5_esw_for_all_reps(esw, i, rep) \
for ((i) = MLX5_VPORT_PF; \
(rep) = &(esw)->offloads.vport_reps[i], \
(i) < (esw)->total_vports; (i)++)
#define mlx5_esw_for_each_vf_rep(esw, i, rep, nvfs) \
for ((i) = MLX5_VPORT_FIRST_VF; \
(rep) = &(esw)->offloads.vport_reps[i], \
(i) <= (nvfs); (i)++)
#define mlx5_esw_for_each_vf_rep_reverse(esw, i, rep, nvfs) \
for ((i) = (nvfs); \
(rep) = &(esw)->offloads.vport_reps[i], \
(i) >= MLX5_VPORT_FIRST_VF; (i)--)
#define mlx5_esw_for_each_vf_vport_num(esw, vport, nvfs) \
for ((vport) = MLX5_VPORT_FIRST_VF; (vport) <= (nvfs); (vport)++)
#define mlx5_esw_for_each_vf_vport_num_reverse(esw, vport, nvfs) \
for ((vport) = (nvfs); (vport) >= MLX5_VPORT_FIRST_VF; (vport)--)
/* Includes host PF (vport 0) if it's not esw manager. */
#define mlx5_esw_for_each_host_func_rep(esw, i, rep, nvfs) \
for ((i) = (esw)->first_host_vport; \
(rep) = &(esw)->offloads.vport_reps[i], \
(i) <= (nvfs); (i)++)
#define mlx5_esw_for_each_host_func_rep_reverse(esw, i, rep, nvfs) \
for ((i) = (nvfs); \
(rep) = &(esw)->offloads.vport_reps[i], \
(i) >= (esw)->first_host_vport; (i)--)
#define mlx5_esw_for_each_host_func_vport(esw, vport, nvfs) \
for ((vport) = (esw)->first_host_vport; \
(vport) <= (nvfs); (vport)++)
#define mlx5_esw_for_each_host_func_vport_reverse(esw, vport, nvfs) \
for ((vport) = (nvfs); \
(vport) >= (esw)->first_host_vport; (vport)--)
#define mlx5_esw_for_each_sf_rep(esw, i, rep) \
for ((i) = mlx5_esw_sf_start_idx(esw); \
(rep) = &(esw)->offloads.vport_reps[(i)], \
(i) < mlx5_esw_sf_end_idx(esw); (i++))
#define mlx5_esw_for_each_vport(esw, index, vport) \
xa_for_each(&((esw)->vports), index, vport)
#define mlx5_esw_for_each_entry_marked(xa, index, entry, last, filter) \
for (index = 0, entry = xa_find(xa, &index, last, filter); \
entry; entry = xa_find_after(xa, &index, last, filter))
#define mlx5_esw_for_each_vport_marked(esw, index, vport, last, filter) \
mlx5_esw_for_each_entry_marked(&((esw)->vports), index, vport, last, filter)
#define mlx5_esw_for_each_vf_vport(esw, index, vport, last) \
mlx5_esw_for_each_vport_marked(esw, index, vport, last, MLX5_ESW_VPT_VF)
#define mlx5_esw_for_each_host_func_vport(esw, index, vport, last) \
mlx5_esw_for_each_vport_marked(esw, index, vport, last, MLX5_ESW_VPT_HOST_FN)
struct mlx5_eswitch *mlx5_devlink_eswitch_get(struct devlink *devlink);
struct mlx5_vport *__must_check
mlx5_eswitch_get_vport(struct mlx5_eswitch *esw, u16 vport_num);
bool mlx5_eswitch_is_vf_vport(const struct mlx5_eswitch *esw, u16 vport_num);
bool mlx5_eswitch_is_vf_vport(struct mlx5_eswitch *esw, u16 vport_num);
bool mlx5_esw_is_sf_vport(struct mlx5_eswitch *esw, u16 vport_num);
int mlx5_esw_funcs_changed_handler(struct notifier_block *nb, unsigned long type, void *data);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment