Commit ec2fa47d authored by Mark Bloch's avatar Mark Bloch Committed by Saeed Mahameed

net/mlx5: Lag, use lag lock

Use a lag specific lock instead of depending on external locks to
synchronise the lag creation/destruction.

With this, taking E-Switch mode lock is no longer needed for syncing
lag logic.

Cleanup any dead code that is left over and don't export functions that
aren't used outside the E-Switch core code.
Signed-off-by: default avatarMark Bloch <mbloch@nvidia.com>
Reviewed-by: default avatarMaor Gottlieb <maorg@nvidia.com>
Signed-off-by: default avatarSaeed Mahameed <saeedm@nvidia.com>
parent 4202ea95
...@@ -1569,9 +1569,7 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev) ...@@ -1569,9 +1569,7 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
ida_init(&esw->offloads.vport_metadata_ida); ida_init(&esw->offloads.vport_metadata_ida);
xa_init_flags(&esw->offloads.vhca_map, XA_FLAGS_ALLOC); xa_init_flags(&esw->offloads.vhca_map, XA_FLAGS_ALLOC);
mutex_init(&esw->state_lock); mutex_init(&esw->state_lock);
lockdep_register_key(&esw->mode_lock_key);
init_rwsem(&esw->mode_lock); init_rwsem(&esw->mode_lock);
lockdep_set_class(&esw->mode_lock, &esw->mode_lock_key);
refcount_set(&esw->qos.refcnt, 0); refcount_set(&esw->qos.refcnt, 0);
esw->enabled_vports = 0; esw->enabled_vports = 0;
...@@ -1615,7 +1613,6 @@ void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw) ...@@ -1615,7 +1613,6 @@ void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw)
esw->dev->priv.eswitch = NULL; esw->dev->priv.eswitch = NULL;
destroy_workqueue(esw->work_queue); destroy_workqueue(esw->work_queue);
WARN_ON(refcount_read(&esw->qos.refcnt)); WARN_ON(refcount_read(&esw->qos.refcnt));
lockdep_unregister_key(&esw->mode_lock_key);
mutex_destroy(&esw->state_lock); mutex_destroy(&esw->state_lock);
WARN_ON(!xa_empty(&esw->offloads.vhca_map)); WARN_ON(!xa_empty(&esw->offloads.vhca_map));
xa_destroy(&esw->offloads.vhca_map); xa_destroy(&esw->offloads.vhca_map);
...@@ -2003,17 +2000,6 @@ void mlx5_esw_unlock(struct mlx5_eswitch *esw) ...@@ -2003,17 +2000,6 @@ void mlx5_esw_unlock(struct mlx5_eswitch *esw)
up_write(&esw->mode_lock); up_write(&esw->mode_lock);
} }
/**
* mlx5_esw_lock() - Take write lock on esw mode lock
* @esw: eswitch device.
*/
void mlx5_esw_lock(struct mlx5_eswitch *esw)
{
if (!mlx5_esw_allowed(esw))
return;
down_write(&esw->mode_lock);
}
/** /**
* mlx5_eswitch_get_total_vports - Get total vports of the eswitch * mlx5_eswitch_get_total_vports - Get total vports of the eswitch
* *
......
...@@ -331,7 +331,6 @@ struct mlx5_eswitch { ...@@ -331,7 +331,6 @@ struct mlx5_eswitch {
u32 large_group_num; u32 large_group_num;
} params; } params;
struct blocking_notifier_head n_head; struct blocking_notifier_head n_head;
struct lock_class_key mode_lock_key;
}; };
void esw_offloads_disable(struct mlx5_eswitch *esw); void esw_offloads_disable(struct mlx5_eswitch *esw);
...@@ -704,7 +703,6 @@ void mlx5_esw_get(struct mlx5_core_dev *dev); ...@@ -704,7 +703,6 @@ void mlx5_esw_get(struct mlx5_core_dev *dev);
void mlx5_esw_put(struct mlx5_core_dev *dev); void mlx5_esw_put(struct mlx5_core_dev *dev);
int mlx5_esw_try_lock(struct mlx5_eswitch *esw); int mlx5_esw_try_lock(struct mlx5_eswitch *esw);
void mlx5_esw_unlock(struct mlx5_eswitch *esw); void mlx5_esw_unlock(struct mlx5_eswitch *esw);
void mlx5_esw_lock(struct mlx5_eswitch *esw);
void esw_vport_change_handle_locked(struct mlx5_vport *vport); void esw_vport_change_handle_locked(struct mlx5_vport *vport);
...@@ -730,9 +728,6 @@ static inline const u32 *mlx5_esw_query_functions(struct mlx5_core_dev *dev) ...@@ -730,9 +728,6 @@ static inline const u32 *mlx5_esw_query_functions(struct mlx5_core_dev *dev)
return ERR_PTR(-EOPNOTSUPP); return ERR_PTR(-EOPNOTSUPP);
} }
static inline void mlx5_esw_unlock(struct mlx5_eswitch *esw) { return; }
static inline void mlx5_esw_lock(struct mlx5_eswitch *esw) { return; }
static inline struct mlx5_flow_handle * static inline struct mlx5_flow_handle *
esw_add_restore_rule(struct mlx5_eswitch *esw, u32 tag) esw_add_restore_rule(struct mlx5_eswitch *esw, u32 tag)
{ {
......
...@@ -121,6 +121,7 @@ static void mlx5_ldev_free(struct kref *ref) ...@@ -121,6 +121,7 @@ static void mlx5_ldev_free(struct kref *ref)
mlx5_lag_mp_cleanup(ldev); mlx5_lag_mp_cleanup(ldev);
cancel_delayed_work_sync(&ldev->bond_work); cancel_delayed_work_sync(&ldev->bond_work);
destroy_workqueue(ldev->wq); destroy_workqueue(ldev->wq);
mutex_destroy(&ldev->lock);
kfree(ldev); kfree(ldev);
} }
...@@ -150,6 +151,7 @@ static struct mlx5_lag *mlx5_lag_dev_alloc(struct mlx5_core_dev *dev) ...@@ -150,6 +151,7 @@ static struct mlx5_lag *mlx5_lag_dev_alloc(struct mlx5_core_dev *dev)
} }
kref_init(&ldev->ref); kref_init(&ldev->ref);
mutex_init(&ldev->lock);
INIT_DELAYED_WORK(&ldev->bond_work, mlx5_do_bond_work); INIT_DELAYED_WORK(&ldev->bond_work, mlx5_do_bond_work);
ldev->nb.notifier_call = mlx5_lag_netdev_event; ldev->nb.notifier_call = mlx5_lag_netdev_event;
...@@ -643,31 +645,11 @@ static void mlx5_queue_bond_work(struct mlx5_lag *ldev, unsigned long delay) ...@@ -643,31 +645,11 @@ static void mlx5_queue_bond_work(struct mlx5_lag *ldev, unsigned long delay)
queue_delayed_work(ldev->wq, &ldev->bond_work, delay); queue_delayed_work(ldev->wq, &ldev->bond_work, delay);
} }
static void mlx5_lag_lock_eswitches(struct mlx5_core_dev *dev0,
struct mlx5_core_dev *dev1)
{
if (dev0)
mlx5_esw_lock(dev0->priv.eswitch);
if (dev1)
mlx5_esw_lock(dev1->priv.eswitch);
}
static void mlx5_lag_unlock_eswitches(struct mlx5_core_dev *dev0,
struct mlx5_core_dev *dev1)
{
if (dev1)
mlx5_esw_unlock(dev1->priv.eswitch);
if (dev0)
mlx5_esw_unlock(dev0->priv.eswitch);
}
static void mlx5_do_bond_work(struct work_struct *work) static void mlx5_do_bond_work(struct work_struct *work)
{ {
struct delayed_work *delayed_work = to_delayed_work(work); struct delayed_work *delayed_work = to_delayed_work(work);
struct mlx5_lag *ldev = container_of(delayed_work, struct mlx5_lag, struct mlx5_lag *ldev = container_of(delayed_work, struct mlx5_lag,
bond_work); bond_work);
struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
struct mlx5_core_dev *dev1 = ldev->pf[MLX5_LAG_P2].dev;
int status; int status;
status = mlx5_dev_list_trylock(); status = mlx5_dev_list_trylock();
...@@ -676,15 +658,16 @@ static void mlx5_do_bond_work(struct work_struct *work) ...@@ -676,15 +658,16 @@ static void mlx5_do_bond_work(struct work_struct *work)
return; return;
} }
mutex_lock(&ldev->lock);
if (ldev->mode_changes_in_progress) { if (ldev->mode_changes_in_progress) {
mutex_unlock(&ldev->lock);
mlx5_dev_list_unlock(); mlx5_dev_list_unlock();
mlx5_queue_bond_work(ldev, HZ); mlx5_queue_bond_work(ldev, HZ);
return; return;
} }
mlx5_lag_lock_eswitches(dev0, dev1);
mlx5_do_bond(ldev); mlx5_do_bond(ldev);
mlx5_lag_unlock_eswitches(dev0, dev1); mutex_unlock(&ldev->lock);
mlx5_dev_list_unlock(); mlx5_dev_list_unlock();
} }
...@@ -908,7 +891,6 @@ static void mlx5_ldev_add_mdev(struct mlx5_lag *ldev, ...@@ -908,7 +891,6 @@ static void mlx5_ldev_add_mdev(struct mlx5_lag *ldev,
dev->priv.lag = ldev; dev->priv.lag = ldev;
} }
/* Must be called with intf_mutex held */
static void mlx5_ldev_remove_mdev(struct mlx5_lag *ldev, static void mlx5_ldev_remove_mdev(struct mlx5_lag *ldev,
struct mlx5_core_dev *dev) struct mlx5_core_dev *dev)
{ {
...@@ -946,13 +928,18 @@ static int __mlx5_lag_dev_add_mdev(struct mlx5_core_dev *dev) ...@@ -946,13 +928,18 @@ static int __mlx5_lag_dev_add_mdev(struct mlx5_core_dev *dev)
mlx5_core_err(dev, "Failed to alloc lag dev\n"); mlx5_core_err(dev, "Failed to alloc lag dev\n");
return 0; return 0;
} }
} else { mlx5_ldev_add_mdev(ldev, dev);
if (ldev->mode_changes_in_progress) return 0;
return -EAGAIN;
mlx5_ldev_get(ldev);
} }
mutex_lock(&ldev->lock);
if (ldev->mode_changes_in_progress) {
mutex_unlock(&ldev->lock);
return -EAGAIN;
}
mlx5_ldev_get(ldev);
mlx5_ldev_add_mdev(ldev, dev); mlx5_ldev_add_mdev(ldev, dev);
mutex_unlock(&ldev->lock);
return 0; return 0;
} }
...@@ -966,14 +953,14 @@ void mlx5_lag_remove_mdev(struct mlx5_core_dev *dev) ...@@ -966,14 +953,14 @@ void mlx5_lag_remove_mdev(struct mlx5_core_dev *dev)
return; return;
recheck: recheck:
mlx5_dev_list_lock(); mutex_lock(&ldev->lock);
if (ldev->mode_changes_in_progress) { if (ldev->mode_changes_in_progress) {
mlx5_dev_list_unlock(); mutex_unlock(&ldev->lock);
msleep(100); msleep(100);
goto recheck; goto recheck;
} }
mlx5_ldev_remove_mdev(ldev, dev); mlx5_ldev_remove_mdev(ldev, dev);
mlx5_dev_list_unlock(); mutex_unlock(&ldev->lock);
mlx5_ldev_put(ldev); mlx5_ldev_put(ldev);
} }
...@@ -984,32 +971,35 @@ void mlx5_lag_add_mdev(struct mlx5_core_dev *dev) ...@@ -984,32 +971,35 @@ void mlx5_lag_add_mdev(struct mlx5_core_dev *dev)
recheck: recheck:
mlx5_dev_list_lock(); mlx5_dev_list_lock();
err = __mlx5_lag_dev_add_mdev(dev); err = __mlx5_lag_dev_add_mdev(dev);
mlx5_dev_list_unlock();
if (err) { if (err) {
mlx5_dev_list_unlock();
msleep(100); msleep(100);
goto recheck; goto recheck;
} }
mlx5_dev_list_unlock();
} }
/* Must be called with intf_mutex held */
void mlx5_lag_remove_netdev(struct mlx5_core_dev *dev, void mlx5_lag_remove_netdev(struct mlx5_core_dev *dev,
struct net_device *netdev) struct net_device *netdev)
{ {
struct mlx5_lag *ldev; struct mlx5_lag *ldev;
bool lag_is_active;
ldev = mlx5_lag_dev(dev); ldev = mlx5_lag_dev(dev);
if (!ldev) if (!ldev)
return; return;
mutex_lock(&ldev->lock);
mlx5_ldev_remove_netdev(ldev, netdev); mlx5_ldev_remove_netdev(ldev, netdev);
ldev->flags &= ~MLX5_LAG_FLAG_READY; ldev->flags &= ~MLX5_LAG_FLAG_READY;
if (__mlx5_lag_is_active(ldev)) lag_is_active = __mlx5_lag_is_active(ldev);
mutex_unlock(&ldev->lock);
if (lag_is_active)
mlx5_queue_bond_work(ldev, 0); mlx5_queue_bond_work(ldev, 0);
} }
/* Must be called with intf_mutex held */
void mlx5_lag_add_netdev(struct mlx5_core_dev *dev, void mlx5_lag_add_netdev(struct mlx5_core_dev *dev,
struct net_device *netdev) struct net_device *netdev)
{ {
...@@ -1020,6 +1010,7 @@ void mlx5_lag_add_netdev(struct mlx5_core_dev *dev, ...@@ -1020,6 +1010,7 @@ void mlx5_lag_add_netdev(struct mlx5_core_dev *dev,
if (!ldev) if (!ldev)
return; return;
mutex_lock(&ldev->lock);
mlx5_ldev_add_netdev(ldev, dev, netdev); mlx5_ldev_add_netdev(ldev, dev, netdev);
for (i = 0; i < MLX5_MAX_PORTS; i++) for (i = 0; i < MLX5_MAX_PORTS; i++)
...@@ -1028,6 +1019,7 @@ void mlx5_lag_add_netdev(struct mlx5_core_dev *dev, ...@@ -1028,6 +1019,7 @@ void mlx5_lag_add_netdev(struct mlx5_core_dev *dev,
if (i >= MLX5_MAX_PORTS) if (i >= MLX5_MAX_PORTS)
ldev->flags |= MLX5_LAG_FLAG_READY; ldev->flags |= MLX5_LAG_FLAG_READY;
mutex_unlock(&ldev->lock);
mlx5_queue_bond_work(ldev, 0); mlx5_queue_bond_work(ldev, 0);
} }
...@@ -1104,8 +1096,6 @@ EXPORT_SYMBOL(mlx5_lag_is_shared_fdb); ...@@ -1104,8 +1096,6 @@ EXPORT_SYMBOL(mlx5_lag_is_shared_fdb);
void mlx5_lag_disable_change(struct mlx5_core_dev *dev) void mlx5_lag_disable_change(struct mlx5_core_dev *dev)
{ {
struct mlx5_core_dev *dev0;
struct mlx5_core_dev *dev1;
struct mlx5_lag *ldev; struct mlx5_lag *ldev;
ldev = mlx5_lag_dev(dev); ldev = mlx5_lag_dev(dev);
...@@ -1113,16 +1103,13 @@ void mlx5_lag_disable_change(struct mlx5_core_dev *dev) ...@@ -1113,16 +1103,13 @@ void mlx5_lag_disable_change(struct mlx5_core_dev *dev)
return; return;
mlx5_dev_list_lock(); mlx5_dev_list_lock();
mutex_lock(&ldev->lock);
dev0 = ldev->pf[MLX5_LAG_P1].dev;
dev1 = ldev->pf[MLX5_LAG_P2].dev;
ldev->mode_changes_in_progress++; ldev->mode_changes_in_progress++;
if (__mlx5_lag_is_active(ldev)) { if (__mlx5_lag_is_active(ldev))
mlx5_lag_lock_eswitches(dev0, dev1);
mlx5_disable_lag(ldev); mlx5_disable_lag(ldev);
mlx5_lag_unlock_eswitches(dev0, dev1);
} mutex_unlock(&ldev->lock);
mlx5_dev_list_unlock(); mlx5_dev_list_unlock();
} }
...@@ -1134,9 +1121,9 @@ void mlx5_lag_enable_change(struct mlx5_core_dev *dev) ...@@ -1134,9 +1121,9 @@ void mlx5_lag_enable_change(struct mlx5_core_dev *dev)
if (!ldev) if (!ldev)
return; return;
mlx5_dev_list_lock(); mutex_lock(&ldev->lock);
ldev->mode_changes_in_progress--; ldev->mode_changes_in_progress--;
mlx5_dev_list_unlock(); mutex_unlock(&ldev->lock);
mlx5_queue_bond_work(ldev, 0); mlx5_queue_bond_work(ldev, 0);
} }
......
...@@ -56,6 +56,8 @@ struct mlx5_lag { ...@@ -56,6 +56,8 @@ struct mlx5_lag {
struct notifier_block nb; struct notifier_block nb;
struct lag_mp lag_mp; struct lag_mp lag_mp;
struct mlx5_lag_port_sel port_sel; struct mlx5_lag_port_sel port_sel;
/* Protect lag fields/state changes */
struct mutex lock;
}; };
static inline struct mlx5_lag * static inline struct mlx5_lag *
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment