Commit bc4c2f2e authored by Mark Bloch's avatar Mark Bloch Committed by Saeed Mahameed

net/mlx5: Lag, filter non compatible devices

When search for a peer lag device we can filter based on that
device's capabilities.

Downstream patch will be less strict when filtering compatible devices
and remove the limitation where we require exact MLX5_MAX_PORTS and
change it to a range.
Signed-off-by: default avatarMark Bloch <mbloch@nvidia.com>
Reviewed-by: default avatarMaor Gottlieb <maorg@nvidia.com>
Signed-off-by: default avatarSaeed Mahameed <saeedm@nvidia.com>
parent ec2fa47d
...@@ -555,12 +555,9 @@ static u32 mlx5_gen_pci_id(const struct mlx5_core_dev *dev) ...@@ -555,12 +555,9 @@ static u32 mlx5_gen_pci_id(const struct mlx5_core_dev *dev)
PCI_SLOT(dev->pdev->devfn)); PCI_SLOT(dev->pdev->devfn));
} }
static int next_phys_dev(struct device *dev, const void *data) static int _next_phys_dev(struct mlx5_core_dev *mdev,
const struct mlx5_core_dev *curr)
{ {
struct mlx5_adev *madev = container_of(dev, struct mlx5_adev, adev.dev);
struct mlx5_core_dev *mdev = madev->mdev;
const struct mlx5_core_dev *curr = data;
if (!mlx5_core_is_pf(mdev)) if (!mlx5_core_is_pf(mdev))
return 0; return 0;
...@@ -574,8 +571,29 @@ static int next_phys_dev(struct device *dev, const void *data) ...@@ -574,8 +571,29 @@ static int next_phys_dev(struct device *dev, const void *data)
return 1; return 1;
} }
/* Must be called with intf_mutex held */ static int next_phys_dev(struct device *dev, const void *data)
struct mlx5_core_dev *mlx5_get_next_phys_dev(struct mlx5_core_dev *dev) {
struct mlx5_adev *madev = container_of(dev, struct mlx5_adev, adev.dev);
struct mlx5_core_dev *mdev = madev->mdev;
return _next_phys_dev(mdev, data);
}
static int next_phys_dev_lag(struct device *dev, const void *data)
{
struct mlx5_adev *madev = container_of(dev, struct mlx5_adev, adev.dev);
struct mlx5_core_dev *mdev = madev->mdev;
if (!MLX5_CAP_GEN(mdev, vport_group_manager) ||
!MLX5_CAP_GEN(mdev, lag_master) ||
MLX5_CAP_GEN(mdev, num_lag_ports) != MLX5_MAX_PORTS)
return 0;
return _next_phys_dev(mdev, data);
}
static struct mlx5_core_dev *mlx5_get_next_dev(struct mlx5_core_dev *dev,
int (*match)(struct device *dev, const void *data))
{ {
struct auxiliary_device *adev; struct auxiliary_device *adev;
struct mlx5_adev *madev; struct mlx5_adev *madev;
...@@ -583,7 +601,7 @@ struct mlx5_core_dev *mlx5_get_next_phys_dev(struct mlx5_core_dev *dev) ...@@ -583,7 +601,7 @@ struct mlx5_core_dev *mlx5_get_next_phys_dev(struct mlx5_core_dev *dev)
if (!mlx5_core_is_pf(dev)) if (!mlx5_core_is_pf(dev))
return NULL; return NULL;
adev = auxiliary_find_device(NULL, dev, &next_phys_dev); adev = auxiliary_find_device(NULL, dev, match);
if (!adev) if (!adev)
return NULL; return NULL;
...@@ -592,6 +610,20 @@ struct mlx5_core_dev *mlx5_get_next_phys_dev(struct mlx5_core_dev *dev) ...@@ -592,6 +610,20 @@ struct mlx5_core_dev *mlx5_get_next_phys_dev(struct mlx5_core_dev *dev)
return madev->mdev; return madev->mdev;
} }
/* Must be called with intf_mutex held */
struct mlx5_core_dev *mlx5_get_next_phys_dev(struct mlx5_core_dev *dev)
{
lockdep_assert_held(&mlx5_intf_mutex);
return mlx5_get_next_dev(dev, &next_phys_dev);
}
/* Must be called with intf_mutex held */
struct mlx5_core_dev *mlx5_get_next_phys_dev_lag(struct mlx5_core_dev *dev)
{
lockdep_assert_held(&mlx5_intf_mutex);
return mlx5_get_next_dev(dev, &next_phys_dev_lag);
}
void mlx5_dev_list_lock(void) void mlx5_dev_list_lock(void)
{ {
mutex_lock(&mlx5_intf_mutex); mutex_lock(&mlx5_intf_mutex);
......
...@@ -913,12 +913,7 @@ static int __mlx5_lag_dev_add_mdev(struct mlx5_core_dev *dev) ...@@ -913,12 +913,7 @@ static int __mlx5_lag_dev_add_mdev(struct mlx5_core_dev *dev)
struct mlx5_lag *ldev = NULL; struct mlx5_lag *ldev = NULL;
struct mlx5_core_dev *tmp_dev; struct mlx5_core_dev *tmp_dev;
if (!MLX5_CAP_GEN(dev, vport_group_manager) || tmp_dev = mlx5_get_next_phys_dev_lag(dev);
!MLX5_CAP_GEN(dev, lag_master) ||
MLX5_CAP_GEN(dev, num_lag_ports) != MLX5_MAX_PORTS)
return 0;
tmp_dev = mlx5_get_next_phys_dev(dev);
if (tmp_dev) if (tmp_dev)
ldev = tmp_dev->priv.lag; ldev = tmp_dev->priv.lag;
...@@ -968,6 +963,11 @@ void mlx5_lag_add_mdev(struct mlx5_core_dev *dev) ...@@ -968,6 +963,11 @@ void mlx5_lag_add_mdev(struct mlx5_core_dev *dev)
{ {
int err; int err;
if (!MLX5_CAP_GEN(dev, vport_group_manager) ||
!MLX5_CAP_GEN(dev, lag_master) ||
MLX5_CAP_GEN(dev, num_lag_ports) != MLX5_MAX_PORTS)
return;
recheck: recheck:
mlx5_dev_list_lock(); mlx5_dev_list_lock();
err = __mlx5_lag_dev_add_mdev(dev); err = __mlx5_lag_dev_add_mdev(dev);
......
...@@ -210,6 +210,7 @@ void mlx5_detach_device(struct mlx5_core_dev *dev); ...@@ -210,6 +210,7 @@ void mlx5_detach_device(struct mlx5_core_dev *dev);
int mlx5_register_device(struct mlx5_core_dev *dev); int mlx5_register_device(struct mlx5_core_dev *dev);
void mlx5_unregister_device(struct mlx5_core_dev *dev); void mlx5_unregister_device(struct mlx5_core_dev *dev);
struct mlx5_core_dev *mlx5_get_next_phys_dev(struct mlx5_core_dev *dev); struct mlx5_core_dev *mlx5_get_next_phys_dev(struct mlx5_core_dev *dev);
struct mlx5_core_dev *mlx5_get_next_phys_dev_lag(struct mlx5_core_dev *dev);
void mlx5_dev_list_lock(void); void mlx5_dev_list_lock(void);
void mlx5_dev_list_unlock(void); void mlx5_dev_list_unlock(void);
int mlx5_dev_list_trylock(void); int mlx5_dev_list_trylock(void);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment