Commit 5ec8c83e authored by Aviv Heller's avatar Aviv Heller Committed by Doug Ledford

IB/mlx5: Port events in RoCE now rely on netdev events

Since ib_query_port() in RoCE returns the state of its netdev as the port
state, it makes sense to propagate the port up/down events to ib_core
when the netdev port state changes, instead of relying on traditional
core events.

This also keeps both the event and ib_query_port() synchronized.
Signed-off-by: default avatarAviv Heller <avivh@mellanox.com>
Signed-off-by: default avatarLeon Romanovsky <leon@kernel.org>
Signed-off-by: default avatarDoug Ledford <dledford@redhat.com>
parent 350d0e4c
...@@ -107,13 +107,32 @@ static int mlx5_netdev_event(struct notifier_block *this, ...@@ -107,13 +107,32 @@ static int mlx5_netdev_event(struct notifier_block *this,
struct mlx5_ib_dev *ibdev = container_of(this, struct mlx5_ib_dev, struct mlx5_ib_dev *ibdev = container_of(this, struct mlx5_ib_dev,
roce.nb); roce.nb);
if ((event != NETDEV_UNREGISTER) && (event != NETDEV_REGISTER)) switch (event) {
return NOTIFY_DONE; case NETDEV_REGISTER:
case NETDEV_UNREGISTER:
write_lock(&ibdev->roce.netdev_lock);
if (ndev->dev.parent == &ibdev->mdev->pdev->dev)
ibdev->roce.netdev = (event == NETDEV_UNREGISTER) ?
NULL : ndev;
write_unlock(&ibdev->roce.netdev_lock);
break;
write_lock(&ibdev->roce.netdev_lock); case NETDEV_UP:
if (ndev->dev.parent == &ibdev->mdev->pdev->dev) case NETDEV_DOWN:
ibdev->roce.netdev = (event == NETDEV_UNREGISTER) ? NULL : ndev; if (ndev == ibdev->roce.netdev && ibdev->ib_active) {
write_unlock(&ibdev->roce.netdev_lock); struct ib_event ibev = {0};
ibev.device = &ibdev->ib_dev;
ibev.event = (event == NETDEV_UP) ?
IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR;
ibev.element.port_num = 1;
ib_dispatch_event(&ibev);
}
break;
default:
break;
}
return NOTIFY_DONE; return NOTIFY_DONE;
} }
...@@ -2267,14 +2286,19 @@ static void mlx5_ib_event(struct mlx5_core_dev *dev, void *context, ...@@ -2267,14 +2286,19 @@ static void mlx5_ib_event(struct mlx5_core_dev *dev, void *context,
break; break;
case MLX5_DEV_EVENT_PORT_UP: case MLX5_DEV_EVENT_PORT_UP:
ibev.event = IB_EVENT_PORT_ACTIVE;
port = (u8)param;
break;
case MLX5_DEV_EVENT_PORT_DOWN: case MLX5_DEV_EVENT_PORT_DOWN:
case MLX5_DEV_EVENT_PORT_INITIALIZED: case MLX5_DEV_EVENT_PORT_INITIALIZED:
ibev.event = IB_EVENT_PORT_ERR;
port = (u8)param; port = (u8)param;
/* In RoCE, port up/down events are handled in
* mlx5_netdev_event().
*/
if (mlx5_ib_port_link_layer(&ibdev->ib_dev, port) ==
IB_LINK_LAYER_ETHERNET)
return;
ibev.event = (event == MLX5_DEV_EVENT_PORT_UP) ?
IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR;
break; break;
case MLX5_DEV_EVENT_LID_CHANGE: case MLX5_DEV_EVENT_LID_CHANGE:
...@@ -2679,14 +2703,24 @@ static void get_dev_fw_str(struct ib_device *ibdev, char *str, ...@@ -2679,14 +2703,24 @@ static void get_dev_fw_str(struct ib_device *ibdev, char *str,
fw_rev_min(dev->mdev), fw_rev_sub(dev->mdev)); fw_rev_min(dev->mdev), fw_rev_sub(dev->mdev));
} }
static void mlx5_remove_roce_notifier(struct mlx5_ib_dev *dev)
{
if (dev->roce.nb.notifier_call) {
unregister_netdevice_notifier(&dev->roce.nb);
dev->roce.nb.notifier_call = NULL;
}
}
static int mlx5_enable_roce(struct mlx5_ib_dev *dev) static int mlx5_enable_roce(struct mlx5_ib_dev *dev)
{ {
int err; int err;
dev->roce.nb.notifier_call = mlx5_netdev_event; dev->roce.nb.notifier_call = mlx5_netdev_event;
err = register_netdevice_notifier(&dev->roce.nb); err = register_netdevice_notifier(&dev->roce.nb);
if (err) if (err) {
dev->roce.nb.notifier_call = NULL;
return err; return err;
}
err = mlx5_nic_vport_enable_roce(dev->mdev); err = mlx5_nic_vport_enable_roce(dev->mdev);
if (err) if (err)
...@@ -2695,14 +2729,13 @@ static int mlx5_enable_roce(struct mlx5_ib_dev *dev) ...@@ -2695,14 +2729,13 @@ static int mlx5_enable_roce(struct mlx5_ib_dev *dev)
return 0; return 0;
err_unregister_netdevice_notifier: err_unregister_netdevice_notifier:
unregister_netdevice_notifier(&dev->roce.nb); mlx5_remove_roce_notifier(dev);
return err; return err;
} }
static void mlx5_disable_roce(struct mlx5_ib_dev *dev) static void mlx5_disable_roce(struct mlx5_ib_dev *dev)
{ {
mlx5_nic_vport_disable_roce(dev->mdev); mlx5_nic_vport_disable_roce(dev->mdev);
unregister_netdevice_notifier(&dev->roce.nb);
} }
static void mlx5_ib_dealloc_q_counters(struct mlx5_ib_dev *dev) static void mlx5_ib_dealloc_q_counters(struct mlx5_ib_dev *dev)
...@@ -3051,8 +3084,10 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev) ...@@ -3051,8 +3084,10 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
destroy_dev_resources(&dev->devr); destroy_dev_resources(&dev->devr);
err_disable_roce: err_disable_roce:
if (ll == IB_LINK_LAYER_ETHERNET) if (ll == IB_LINK_LAYER_ETHERNET) {
mlx5_disable_roce(dev); mlx5_disable_roce(dev);
mlx5_remove_roce_notifier(dev);
}
err_free_port: err_free_port:
kfree(dev->port); kfree(dev->port);
...@@ -3068,6 +3103,7 @@ static void mlx5_ib_remove(struct mlx5_core_dev *mdev, void *context) ...@@ -3068,6 +3103,7 @@ static void mlx5_ib_remove(struct mlx5_core_dev *mdev, void *context)
struct mlx5_ib_dev *dev = context; struct mlx5_ib_dev *dev = context;
enum rdma_link_layer ll = mlx5_ib_port_link_layer(&dev->ib_dev, 1); enum rdma_link_layer ll = mlx5_ib_port_link_layer(&dev->ib_dev, 1);
mlx5_remove_roce_notifier(dev);
ib_unregister_device(&dev->ib_dev); ib_unregister_device(&dev->ib_dev);
mlx5_ib_dealloc_q_counters(dev); mlx5_ib_dealloc_q_counters(dev);
destroy_umrc_res(dev); destroy_umrc_res(dev);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment