Commit 4c7f4028 authored by Chris Mi's avatar Chris Mi Committed by Saeed Mahameed

net/mlx5: E-switch, Move vport table functions to a new file

Currently, the vport table functions are in common eswitch offload
file. This file is too big. Move the vport table create, delete and
lookup functions to a separate file. Put the file in esw directory.

Pre-step for generalizing its functionality for serving both the
mirroring and the sample features.
Signed-off-by: default avatarChris Mi <cmi@nvidia.com>
Reviewed-by: default avatarOz Shlomo <ozsh@nvidia.com>
Reviewed-by: default avatarMark Bloch <mbloch@nvidia.com>
Signed-off-by: default avatarSaeed Mahameed <saeedm@nvidia.com>
parent 0b35e0de
......@@ -53,7 +53,7 @@ mlx5_core-$(CONFIG_MLX5_ESWITCH) += eswitch.o eswitch_offloads.o eswitch_offlo
mlx5_core-$(CONFIG_MLX5_ESWITCH) += esw/acl/helper.o \
esw/acl/egress_lgcy.o esw/acl/egress_ofld.o \
esw/acl/ingress_lgcy.o esw/acl/ingress_ofld.o \
esw/devlink_port.o
esw/devlink_port.o esw/vporttbl.o
mlx5_core-$(CONFIG_MLX5_MPFS) += lib/mpfs.o
mlx5_core-$(CONFIG_VXLAN) += lib/vxlan.o
......
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
// Copyright (c) 2021 Mellanox Technologies.
#include "eswitch.h"
#define MLX5_ESW_VPORT_TABLE_SIZE 128
#define MLX5_ESW_VPORT_TBL_NUM_GROUPS 4
/* This struct is used as a key to the hash table and we need it to be packed
* so hash result is consistent
*/
struct mlx5_vport_key {
u32 chain;
u16 prio;
u16 vport;
u16 vhca_id;
} __packed;
struct mlx5_vport_table {
struct hlist_node hlist;
struct mlx5_flow_table *fdb;
u32 num_rules;
struct mlx5_vport_key key;
};
static struct mlx5_flow_table *
esw_vport_tbl_create(struct mlx5_eswitch *esw, struct mlx5_flow_namespace *ns)
{
struct mlx5_flow_table_attr ft_attr = {};
struct mlx5_flow_table *fdb;
ft_attr.autogroup.max_num_groups = MLX5_ESW_VPORT_TBL_NUM_GROUPS;
ft_attr.max_fte = MLX5_ESW_VPORT_TABLE_SIZE;
ft_attr.prio = FDB_PER_VPORT;
fdb = mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
if (IS_ERR(fdb)) {
esw_warn(esw->dev, "Failed to create per vport FDB Table err %ld\n",
PTR_ERR(fdb));
}
return fdb;
}
static u32 flow_attr_to_vport_key(struct mlx5_eswitch *esw,
struct mlx5_vport_tbl_attr *attr,
struct mlx5_vport_key *key)
{
key->vport = attr->vport;
key->chain = attr->chain;
key->prio = attr->prio;
key->vhca_id = MLX5_CAP_GEN(esw->dev, vhca_id);
return jhash(key, sizeof(*key), 0);
}
/* caller must hold vports.lock */
static struct mlx5_vport_table *
esw_vport_tbl_lookup(struct mlx5_eswitch *esw, struct mlx5_vport_key *skey, u32 key)
{
struct mlx5_vport_table *e;
hash_for_each_possible(esw->fdb_table.offloads.vports.table, e, hlist, key)
if (!memcmp(&e->key, skey, sizeof(*skey)))
return e;
return NULL;
}
struct mlx5_flow_table *
esw_vport_tbl_get(struct mlx5_eswitch *esw, struct mlx5_vport_tbl_attr *attr)
{
struct mlx5_core_dev *dev = esw->dev;
struct mlx5_flow_namespace *ns;
struct mlx5_flow_table *fdb;
struct mlx5_vport_table *e;
struct mlx5_vport_key skey;
u32 hkey;
mutex_lock(&esw->fdb_table.offloads.vports.lock);
hkey = flow_attr_to_vport_key(esw, attr, &skey);
e = esw_vport_tbl_lookup(esw, &skey, hkey);
if (e) {
e->num_rules++;
goto out;
}
e = kzalloc(sizeof(*e), GFP_KERNEL);
if (!e) {
fdb = ERR_PTR(-ENOMEM);
goto err_alloc;
}
ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
if (!ns) {
esw_warn(dev, "Failed to get FDB namespace\n");
fdb = ERR_PTR(-ENOENT);
goto err_ns;
}
fdb = esw_vport_tbl_create(esw, ns);
if (IS_ERR(fdb))
goto err_ns;
e->fdb = fdb;
e->num_rules = 1;
e->key = skey;
hash_add(esw->fdb_table.offloads.vports.table, &e->hlist, hkey);
out:
mutex_unlock(&esw->fdb_table.offloads.vports.lock);
return e->fdb;
err_ns:
kfree(e);
err_alloc:
mutex_unlock(&esw->fdb_table.offloads.vports.lock);
return fdb;
}
void
esw_vport_tbl_put(struct mlx5_eswitch *esw, struct mlx5_vport_tbl_attr *attr)
{
struct mlx5_vport_table *e;
struct mlx5_vport_key key;
u32 hkey;
mutex_lock(&esw->fdb_table.offloads.vports.lock);
hkey = flow_attr_to_vport_key(esw, attr, &key);
e = esw_vport_tbl_lookup(esw, &key, hkey);
if (!e || --e->num_rules)
goto out;
hash_del(&e->hlist);
mlx5_destroy_flow_table(e->fdb);
kfree(e);
out:
mutex_unlock(&esw->fdb_table.offloads.vports.lock);
}
......@@ -713,8 +713,16 @@ void
esw_vport_destroy_offloads_acl_tables(struct mlx5_eswitch *esw,
struct mlx5_vport *vport);
int mlx5_esw_vport_tbl_get(struct mlx5_eswitch *esw);
void mlx5_esw_vport_tbl_put(struct mlx5_eswitch *esw);
struct mlx5_vport_tbl_attr {
u16 chain;
u16 prio;
u16 vport;
};
struct mlx5_flow_table *
esw_vport_tbl_get(struct mlx5_eswitch *esw, struct mlx5_vport_tbl_attr *attr);
void
esw_vport_tbl_put(struct mlx5_eswitch *esw, struct mlx5_vport_tbl_attr *attr);
struct mlx5_flow_handle *
esw_add_restore_rule(struct mlx5_eswitch *esw, u32 tag);
......
......@@ -54,185 +54,6 @@
#define MLX5_ESW_MISS_FLOWS (2)
#define UPLINK_REP_INDEX 0
/* Per vport tables */
#define MLX5_ESW_VPORT_TABLE_SIZE 128
/* This struct is used as a key to the hash table and we need it to be packed
* so hash result is consistent
*/
struct mlx5_vport_key {
u32 chain;
u16 prio;
u16 vport;
u16 vhca_id;
} __packed;
struct mlx5_vport_tbl_attr {
u16 chain;
u16 prio;
u16 vport;
};
struct mlx5_vport_table {
struct hlist_node hlist;
struct mlx5_flow_table *fdb;
u32 num_rules;
struct mlx5_vport_key key;
};
#define MLX5_ESW_VPORT_TBL_NUM_GROUPS 4
static struct mlx5_flow_table *
esw_vport_tbl_create(struct mlx5_eswitch *esw, struct mlx5_flow_namespace *ns)
{
struct mlx5_flow_table_attr ft_attr = {};
struct mlx5_flow_table *fdb;
ft_attr.autogroup.max_num_groups = MLX5_ESW_VPORT_TBL_NUM_GROUPS;
ft_attr.max_fte = MLX5_ESW_VPORT_TABLE_SIZE;
ft_attr.prio = FDB_PER_VPORT;
fdb = mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
if (IS_ERR(fdb)) {
esw_warn(esw->dev, "Failed to create per vport FDB Table err %ld\n",
PTR_ERR(fdb));
}
return fdb;
}
static u32 flow_attr_to_vport_key(struct mlx5_eswitch *esw,
struct mlx5_vport_tbl_attr *attr,
struct mlx5_vport_key *key)
{
key->vport = attr->vport;
key->chain = attr->chain;
key->prio = attr->prio;
key->vhca_id = MLX5_CAP_GEN(esw->dev, vhca_id);
return jhash(key, sizeof(*key), 0);
}
/* caller must hold vports.lock */
static struct mlx5_vport_table *
esw_vport_tbl_lookup(struct mlx5_eswitch *esw, struct mlx5_vport_key *skey, u32 key)
{
struct mlx5_vport_table *e;
hash_for_each_possible(esw->fdb_table.offloads.vports.table, e, hlist, key)
if (!memcmp(&e->key, skey, sizeof(*skey)))
return e;
return NULL;
}
static void
esw_vport_tbl_put(struct mlx5_eswitch *esw, struct mlx5_vport_tbl_attr *attr)
{
struct mlx5_vport_table *e;
struct mlx5_vport_key key;
u32 hkey;
mutex_lock(&esw->fdb_table.offloads.vports.lock);
hkey = flow_attr_to_vport_key(esw, attr, &key);
e = esw_vport_tbl_lookup(esw, &key, hkey);
if (!e || --e->num_rules)
goto out;
hash_del(&e->hlist);
mlx5_destroy_flow_table(e->fdb);
kfree(e);
out:
mutex_unlock(&esw->fdb_table.offloads.vports.lock);
}
static struct mlx5_flow_table *
esw_vport_tbl_get(struct mlx5_eswitch *esw, struct mlx5_vport_tbl_attr *attr)
{
struct mlx5_core_dev *dev = esw->dev;
struct mlx5_flow_namespace *ns;
struct mlx5_flow_table *fdb;
struct mlx5_vport_table *e;
struct mlx5_vport_key skey;
u32 hkey;
mutex_lock(&esw->fdb_table.offloads.vports.lock);
hkey = flow_attr_to_vport_key(esw, attr, &skey);
e = esw_vport_tbl_lookup(esw, &skey, hkey);
if (e) {
e->num_rules++;
goto out;
}
e = kzalloc(sizeof(*e), GFP_KERNEL);
if (!e) {
fdb = ERR_PTR(-ENOMEM);
goto err_alloc;
}
ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
if (!ns) {
esw_warn(dev, "Failed to get FDB namespace\n");
fdb = ERR_PTR(-ENOENT);
goto err_ns;
}
fdb = esw_vport_tbl_create(esw, ns);
if (IS_ERR(fdb))
goto err_ns;
e->fdb = fdb;
e->num_rules = 1;
e->key = skey;
hash_add(esw->fdb_table.offloads.vports.table, &e->hlist, hkey);
out:
mutex_unlock(&esw->fdb_table.offloads.vports.lock);
return e->fdb;
err_ns:
kfree(e);
err_alloc:
mutex_unlock(&esw->fdb_table.offloads.vports.lock);
return fdb;
}
int mlx5_esw_vport_tbl_get(struct mlx5_eswitch *esw)
{
struct mlx5_vport_tbl_attr attr;
struct mlx5_flow_table *fdb;
struct mlx5_vport *vport;
int i;
attr.chain = 0;
attr.prio = 1;
mlx5_esw_for_all_vports(esw, i, vport) {
attr.vport = vport->vport;
fdb = esw_vport_tbl_get(esw, &attr);
if (IS_ERR(fdb))
goto out;
}
return 0;
out:
mlx5_esw_vport_tbl_put(esw);
return PTR_ERR(fdb);
}
void mlx5_esw_vport_tbl_put(struct mlx5_eswitch *esw)
{
struct mlx5_vport_tbl_attr attr;
struct mlx5_vport *vport;
int i;
attr.chain = 0;
attr.prio = 1;
mlx5_esw_for_all_vports(esw, i, vport) {
attr.vport = vport->vport;
esw_vport_tbl_put(esw, &attr);
}
}
/* End: Per vport tables */
static struct mlx5_eswitch_rep *mlx5_eswitch_get_rep(struct mlx5_eswitch *esw,
u16 vport_num)
{
......@@ -1514,6 +1335,42 @@ static void esw_set_flow_group_source_port(struct mlx5_eswitch *esw,
}
#if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
static void mlx5_esw_vport_tbl_put(struct mlx5_eswitch *esw)
{
struct mlx5_vport_tbl_attr attr;
struct mlx5_vport *vport;
int i;
attr.chain = 0;
attr.prio = 1;
mlx5_esw_for_all_vports(esw, i, vport) {
attr.vport = vport->vport;
esw_vport_tbl_put(esw, &attr);
}
}
static int mlx5_esw_vport_tbl_get(struct mlx5_eswitch *esw)
{
struct mlx5_vport_tbl_attr attr;
struct mlx5_flow_table *fdb;
struct mlx5_vport *vport;
int i;
attr.chain = 0;
attr.prio = 1;
mlx5_esw_for_all_vports(esw, i, vport) {
attr.vport = vport->vport;
fdb = esw_vport_tbl_get(esw, &attr);
if (IS_ERR(fdb))
goto out;
}
return 0;
out:
mlx5_esw_vport_tbl_put(esw);
return PTR_ERR(fdb);
}
#define fdb_modify_header_fwd_to_table_supported(esw) \
(MLX5_CAP_ESW_FLOWTABLE((esw)->dev, fdb_modify_header_fwd_to_table))
static void esw_init_chains_offload_flags(struct mlx5_eswitch *esw, u32 *flags)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment