Commit 1d797935 authored by Parav Pandit's avatar Parav Pandit Committed by Saeed Mahameed

net/mlx5: SF, Rely on hw table for SF devlink port allocation

Supporting SF allocation is currently checked at two places:
(a) SF devlink port allocator and
(b) SF HW table handler.

Both layers are using HCA CAP to identify it using helper routine
mlx5_sf_supported() and mlx5_sf_max_functions().

Instead, rely on the HW table handler to check if SF is supported
or not.
Signed-off-by: default avatarParav Pandit <parav@nvidia.com>
Reviewed-by: default avatarVu Pham <vuhuong@nvidia.com>
Signed-off-by: default avatarSaeed Mahameed <saeedm@nvidia.com>
parent 87bd418e
...@@ -437,9 +437,6 @@ static int mlx5_sf_vhca_event(struct notifier_block *nb, unsigned long opcode, v ...@@ -437,9 +437,6 @@ static int mlx5_sf_vhca_event(struct notifier_block *nb, unsigned long opcode, v
static void mlx5_sf_table_enable(struct mlx5_sf_table *table) static void mlx5_sf_table_enable(struct mlx5_sf_table *table)
{ {
if (!mlx5_sf_max_functions(table->dev))
return;
init_completion(&table->disable_complete); init_completion(&table->disable_complete);
refcount_set(&table->refcount, 1); refcount_set(&table->refcount, 1);
} }
...@@ -462,9 +459,6 @@ static void mlx5_sf_deactivate_all(struct mlx5_sf_table *table) ...@@ -462,9 +459,6 @@ static void mlx5_sf_deactivate_all(struct mlx5_sf_table *table)
static void mlx5_sf_table_disable(struct mlx5_sf_table *table) static void mlx5_sf_table_disable(struct mlx5_sf_table *table)
{ {
if (!mlx5_sf_max_functions(table->dev))
return;
if (!refcount_read(&table->refcount)) if (!refcount_read(&table->refcount))
return; return;
...@@ -498,7 +492,8 @@ static int mlx5_sf_esw_event(struct notifier_block *nb, unsigned long event, voi ...@@ -498,7 +492,8 @@ static int mlx5_sf_esw_event(struct notifier_block *nb, unsigned long event, voi
static bool mlx5_sf_table_supported(const struct mlx5_core_dev *dev) static bool mlx5_sf_table_supported(const struct mlx5_core_dev *dev)
{ {
return dev->priv.eswitch && MLX5_ESWITCH_MANAGER(dev) && mlx5_sf_supported(dev); return dev->priv.eswitch && MLX5_ESWITCH_MANAGER(dev) &&
mlx5_sf_hw_table_supported(dev);
} }
int mlx5_sf_table_init(struct mlx5_core_dev *dev) int mlx5_sf_table_init(struct mlx5_core_dev *dev)
......
...@@ -41,7 +41,7 @@ int mlx5_sf_hw_table_sf_alloc(struct mlx5_core_dev *dev, u32 usr_sfnum) ...@@ -41,7 +41,7 @@ int mlx5_sf_hw_table_sf_alloc(struct mlx5_core_dev *dev, u32 usr_sfnum)
int err; int err;
int i; int i;
if (!table->max_local_functions) if (!table || !table->max_local_functions)
return -EOPNOTSUPP; return -EOPNOTSUPP;
mutex_lock(&table->table_lock); mutex_lock(&table->table_lock);
...@@ -230,3 +230,8 @@ void mlx5_sf_hw_table_destroy(struct mlx5_core_dev *dev) ...@@ -230,3 +230,8 @@ void mlx5_sf_hw_table_destroy(struct mlx5_core_dev *dev)
/* Dealloc SFs whose firmware event has been missed. */ /* Dealloc SFs whose firmware event has been missed. */
mlx5_sf_hw_dealloc_all(table); mlx5_sf_hw_dealloc_all(table);
} }
bool mlx5_sf_hw_table_supported(const struct mlx5_core_dev *dev)
{
return !!dev->priv.sf_hw_table;
}
...@@ -17,5 +17,6 @@ u16 mlx5_sf_sw_to_hw_id(const struct mlx5_core_dev *dev, u16 sw_id); ...@@ -17,5 +17,6 @@ u16 mlx5_sf_sw_to_hw_id(const struct mlx5_core_dev *dev, u16 sw_id);
int mlx5_sf_hw_table_sf_alloc(struct mlx5_core_dev *dev, u32 usr_sfnum); int mlx5_sf_hw_table_sf_alloc(struct mlx5_core_dev *dev, u32 usr_sfnum);
void mlx5_sf_hw_table_sf_free(struct mlx5_core_dev *dev, u16 id); void mlx5_sf_hw_table_sf_free(struct mlx5_core_dev *dev, u16 id);
void mlx5_sf_hw_table_sf_deferred_free(struct mlx5_core_dev *dev, u16 id); void mlx5_sf_hw_table_sf_deferred_free(struct mlx5_core_dev *dev, u16 id);
bool mlx5_sf_hw_table_supported(const struct mlx5_core_dev *dev);
#endif #endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment