Commit 1339678f authored by Yevgeny Kliteynik's avatar Yevgeny Kliteynik Committed by Saeed Mahameed

net/mlx5: DR, Manage definers with refcounts

In many cases different actions will ask for the same definer format.
Instead of allocating new definer general object and running out of
definers, have an xarray of allocated definers and keep track of their
usage with refcounts: allocate a new definer only when there isn't
one with the same format already created, and destroy definer only
when its refcount runs down to zero.
Signed-off-by: default avatarYevgeny Kliteynik <kliteyn@nvidia.com>
Reviewed-by: default avatarAlex Vesker <valex@nvidia.com>
Signed-off-by: default avatarSaeed Mahameed <saeedm@nvidia.com>
parent c72a57ad
...@@ -111,6 +111,7 @@ mlx5_core-$(CONFIG_MLX5_SW_STEERING) += steering/dr_domain.o steering/dr_table.o ...@@ -111,6 +111,7 @@ mlx5_core-$(CONFIG_MLX5_SW_STEERING) += steering/dr_domain.o steering/dr_table.o
steering/dr_ste_v2.o \ steering/dr_ste_v2.o \
steering/dr_cmd.o steering/dr_fw.o \ steering/dr_cmd.o steering/dr_fw.o \
steering/dr_action.o steering/fs_dr.o \ steering/dr_action.o steering/fs_dr.o \
steering/dr_definer.o \
steering/dr_dbg.o lib/smfs.o steering/dr_dbg.o lib/smfs.o
# #
# SF device # SF device
......
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
// Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#include "dr_types.h"
#include "dr_ste.h"
struct dr_definer_object {
u32 id;
u16 format_id;
u8 dw_selectors[MLX5_IFC_DEFINER_DW_SELECTORS_NUM];
u8 byte_selectors[MLX5_IFC_DEFINER_BYTE_SELECTORS_NUM];
u8 match_mask[DR_STE_SIZE_MATCH_TAG];
refcount_t refcount;
};
static bool dr_definer_compare(struct dr_definer_object *definer,
u16 format_id, u8 *dw_selectors,
u8 *byte_selectors, u8 *match_mask)
{
int i;
if (definer->format_id != format_id)
return false;
for (i = 0; i < MLX5_IFC_DEFINER_DW_SELECTORS_NUM; i++)
if (definer->dw_selectors[i] != dw_selectors[i])
return false;
for (i = 0; i < MLX5_IFC_DEFINER_BYTE_SELECTORS_NUM; i++)
if (definer->byte_selectors[i] != byte_selectors[i])
return false;
if (memcmp(definer->match_mask, match_mask, DR_STE_SIZE_MATCH_TAG))
return false;
return true;
}
static struct dr_definer_object *
dr_definer_find_obj(struct mlx5dr_domain *dmn, u16 format_id,
u8 *dw_selectors, u8 *byte_selectors, u8 *match_mask)
{
struct dr_definer_object *definer_obj;
unsigned long id;
xa_for_each(&dmn->definers_xa, id, definer_obj) {
if (dr_definer_compare(definer_obj, format_id,
dw_selectors, byte_selectors,
match_mask))
return definer_obj;
}
return NULL;
}
static struct dr_definer_object *
dr_definer_create_obj(struct mlx5dr_domain *dmn, u16 format_id,
u8 *dw_selectors, u8 *byte_selectors, u8 *match_mask)
{
struct dr_definer_object *definer_obj;
int ret = 0;
definer_obj = kzalloc(sizeof(*definer_obj), GFP_KERNEL);
if (!definer_obj)
return NULL;
ret = mlx5dr_cmd_create_definer(dmn->mdev,
format_id,
dw_selectors,
byte_selectors,
match_mask,
&definer_obj->id);
if (ret)
goto err_free_definer_obj;
/* Definer ID can have 32 bits, but STE format
* supports only definers with 8 bit IDs.
*/
if (definer_obj->id > 0xff) {
mlx5dr_err(dmn, "Unsupported definer ID (%d)\n", definer_obj->id);
goto err_destroy_definer;
}
definer_obj->format_id = format_id;
memcpy(definer_obj->dw_selectors, dw_selectors, sizeof(definer_obj->dw_selectors));
memcpy(definer_obj->byte_selectors, byte_selectors, sizeof(definer_obj->byte_selectors));
memcpy(definer_obj->match_mask, match_mask, sizeof(definer_obj->match_mask));
refcount_set(&definer_obj->refcount, 1);
ret = xa_insert(&dmn->definers_xa, definer_obj->id, definer_obj, GFP_KERNEL);
if (ret) {
mlx5dr_dbg(dmn, "Couldn't insert new definer into xarray (%d)\n", ret);
goto err_destroy_definer;
}
return definer_obj;
err_destroy_definer:
mlx5dr_cmd_destroy_definer(dmn->mdev, definer_obj->id);
err_free_definer_obj:
kfree(definer_obj);
return NULL;
}
static void dr_definer_destroy_obj(struct mlx5dr_domain *dmn,
struct dr_definer_object *definer_obj)
{
mlx5dr_cmd_destroy_definer(dmn->mdev, definer_obj->id);
xa_erase(&dmn->definers_xa, definer_obj->id);
kfree(definer_obj);
}
int mlx5dr_definer_get(struct mlx5dr_domain *dmn, u16 format_id,
u8 *dw_selectors, u8 *byte_selectors,
u8 *match_mask, u32 *definer_id)
{
struct dr_definer_object *definer_obj;
int ret = 0;
definer_obj = dr_definer_find_obj(dmn, format_id, dw_selectors,
byte_selectors, match_mask);
if (!definer_obj) {
definer_obj = dr_definer_create_obj(dmn, format_id,
dw_selectors, byte_selectors,
match_mask);
if (!definer_obj)
return -ENOMEM;
} else {
refcount_inc(&definer_obj->refcount);
}
*definer_id = definer_obj->id;
return ret;
}
void mlx5dr_definer_put(struct mlx5dr_domain *dmn, u32 definer_id)
{
struct dr_definer_object *definer_obj;
definer_obj = xa_load(&dmn->definers_xa, definer_id);
if (!definer_obj) {
mlx5dr_err(dmn, "Definer ID %d not found\n", definer_id);
return;
}
if (refcount_dec_and_test(&definer_obj->refcount))
dr_definer_destroy_obj(dmn, definer_obj);
}
...@@ -425,10 +425,11 @@ mlx5dr_domain_create(struct mlx5_core_dev *mdev, enum mlx5dr_domain_type type) ...@@ -425,10 +425,11 @@ mlx5dr_domain_create(struct mlx5_core_dev *mdev, enum mlx5dr_domain_type type)
refcount_set(&dmn->refcount, 1); refcount_set(&dmn->refcount, 1);
mutex_init(&dmn->info.rx.mutex); mutex_init(&dmn->info.rx.mutex);
mutex_init(&dmn->info.tx.mutex); mutex_init(&dmn->info.tx.mutex);
xa_init(&dmn->definers_xa);
if (dr_domain_caps_init(mdev, dmn)) { if (dr_domain_caps_init(mdev, dmn)) {
mlx5dr_err(dmn, "Failed init domain, no caps\n"); mlx5dr_err(dmn, "Failed init domain, no caps\n");
goto free_domain; goto def_xa_destroy;
} }
dmn->info.max_log_action_icm_sz = DR_CHUNK_SIZE_4K; dmn->info.max_log_action_icm_sz = DR_CHUNK_SIZE_4K;
...@@ -453,7 +454,8 @@ mlx5dr_domain_create(struct mlx5_core_dev *mdev, enum mlx5dr_domain_type type) ...@@ -453,7 +454,8 @@ mlx5dr_domain_create(struct mlx5_core_dev *mdev, enum mlx5dr_domain_type type)
uninit_caps: uninit_caps:
dr_domain_caps_uninit(dmn); dr_domain_caps_uninit(dmn);
free_domain: def_xa_destroy:
xa_destroy(&dmn->definers_xa);
kfree(dmn); kfree(dmn);
return NULL; return NULL;
} }
...@@ -493,6 +495,7 @@ int mlx5dr_domain_destroy(struct mlx5dr_domain *dmn) ...@@ -493,6 +495,7 @@ int mlx5dr_domain_destroy(struct mlx5dr_domain *dmn)
dr_domain_uninit_csum_recalc_fts(dmn); dr_domain_uninit_csum_recalc_fts(dmn);
dr_domain_uninit_resources(dmn); dr_domain_uninit_resources(dmn);
dr_domain_caps_uninit(dmn); dr_domain_caps_uninit(dmn);
xa_destroy(&dmn->definers_xa);
mutex_destroy(&dmn->info.tx.mutex); mutex_destroy(&dmn->info.tx.mutex);
mutex_destroy(&dmn->info.rx.mutex); mutex_destroy(&dmn->info.rx.mutex);
kfree(dmn); kfree(dmn);
......
...@@ -925,6 +925,7 @@ struct mlx5dr_domain { ...@@ -925,6 +925,7 @@ struct mlx5dr_domain {
struct mlx5dr_ste_ctx *ste_ctx; struct mlx5dr_ste_ctx *ste_ctx;
struct list_head dbg_tbl_list; struct list_head dbg_tbl_list;
struct mlx5dr_dbg_dump_info dump_info; struct mlx5dr_dbg_dump_info dump_info;
struct xarray definers_xa;
}; };
struct mlx5dr_table_rx_tx { struct mlx5dr_table_rx_tx {
......
...@@ -142,6 +142,11 @@ mlx5dr_action_create_aso(struct mlx5dr_domain *dmn, ...@@ -142,6 +142,11 @@ mlx5dr_action_create_aso(struct mlx5dr_domain *dmn,
int mlx5dr_action_destroy(struct mlx5dr_action *action); int mlx5dr_action_destroy(struct mlx5dr_action *action);
int mlx5dr_definer_get(struct mlx5dr_domain *dmn, u16 format_id,
u8 *dw_selectors, u8 *byte_selectors,
u8 *match_mask, u32 *definer_id);
void mlx5dr_definer_put(struct mlx5dr_domain *dmn, u32 definer_id);
static inline bool static inline bool
mlx5dr_is_supported(struct mlx5_core_dev *dev) mlx5dr_is_supported(struct mlx5_core_dev *dev)
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment