Commit ca390799 authored by Yuval Avnery's avatar Yuval Avnery Committed by Saeed Mahameed

net/mlx5: Change interrupt handler to call chain notifier

Multiple EQs may share the same IRQ in subsequent patches.

Instead of calling the IRQ handler directly, the EQ will register
to an atomic chain notfier.

The Linux built-in shared IRQ is not used because it forces the caller
to disable the IRQ and clear affinity before free_irq() can be called.

This patch is the first step in the separation of IRQ and EQ logic.
Signed-off-by: default avatarYuval Avnery <yuvalav@mellanox.com>
Reviewed-by: default avatarParav Pandit <parav@mellanox.com>
Signed-off-by: default avatarSaeed Mahameed <saeedm@mellanox.com>
parent 081cc2d7
...@@ -920,6 +920,7 @@ struct mlx5_ib_lb_state { ...@@ -920,6 +920,7 @@ struct mlx5_ib_lb_state {
}; };
struct mlx5_ib_pf_eq { struct mlx5_ib_pf_eq {
struct notifier_block irq_nb;
struct mlx5_ib_dev *dev; struct mlx5_ib_dev *dev;
struct mlx5_eq *core; struct mlx5_eq *core;
struct work_struct work; struct work_struct work;
......
...@@ -1488,9 +1488,11 @@ static void mlx5_ib_eq_pf_process(struct mlx5_ib_pf_eq *eq) ...@@ -1488,9 +1488,11 @@ static void mlx5_ib_eq_pf_process(struct mlx5_ib_pf_eq *eq)
mlx5_eq_update_ci(eq->core, cc, 1); mlx5_eq_update_ci(eq->core, cc, 1);
} }
static irqreturn_t mlx5_ib_eq_pf_int(int irq, void *eq_ptr) static int mlx5_ib_eq_pf_int(struct notifier_block *nb, unsigned long type,
void *data)
{ {
struct mlx5_ib_pf_eq *eq = eq_ptr; struct mlx5_ib_pf_eq *eq =
container_of(nb, struct mlx5_ib_pf_eq, irq_nb);
unsigned long flags; unsigned long flags;
if (spin_trylock_irqsave(&eq->lock, flags)) { if (spin_trylock_irqsave(&eq->lock, flags)) {
...@@ -1553,12 +1555,12 @@ mlx5_ib_create_pf_eq(struct mlx5_ib_dev *dev, struct mlx5_ib_pf_eq *eq) ...@@ -1553,12 +1555,12 @@ mlx5_ib_create_pf_eq(struct mlx5_ib_dev *dev, struct mlx5_ib_pf_eq *eq)
goto err_mempool; goto err_mempool;
} }
eq->irq_nb.notifier_call = mlx5_ib_eq_pf_int;
param = (struct mlx5_eq_param) { param = (struct mlx5_eq_param) {
.index = MLX5_EQ_PFAULT_IDX, .index = MLX5_EQ_PFAULT_IDX,
.mask = 1 << MLX5_EVENT_TYPE_PAGE_FAULT, .mask = 1 << MLX5_EVENT_TYPE_PAGE_FAULT,
.nent = MLX5_IB_NUM_PF_EQE, .nent = MLX5_IB_NUM_PF_EQE,
.context = eq, .nb = &eq->irq_nb,
.handler = mlx5_ib_eq_pf_int
}; };
eq->core = mlx5_eq_create_generic(dev->mdev, "mlx5_ib_page_fault_eq", &param); eq->core = mlx5_eq_create_generic(dev->mdev, "mlx5_ib_page_fault_eq", &param);
if (IS_ERR(eq->core)) { if (IS_ERR(eq->core)) {
......
...@@ -34,10 +34,17 @@ struct mlx5_eq { ...@@ -34,10 +34,17 @@ struct mlx5_eq {
u8 eqn; u8 eqn;
int nent; int nent;
struct mlx5_rsc_debug *dbg; struct mlx5_rsc_debug *dbg;
struct notifier_block *irq_nb; /* For destroy only */
};
struct mlx5_eq_async {
struct mlx5_eq core;
struct notifier_block irq_nb;
}; };
struct mlx5_eq_comp { struct mlx5_eq_comp {
struct mlx5_eq core; /* Must be first */ struct mlx5_eq core;
struct notifier_block irq_nb;
struct mlx5_eq_tasklet tasklet_ctx; struct mlx5_eq_tasklet tasklet_ctx;
struct list_head list; struct list_head list;
}; };
......
...@@ -26,8 +26,7 @@ struct mlx5_eq_param { ...@@ -26,8 +26,7 @@ struct mlx5_eq_param {
u8 index; u8 index;
int nent; int nent;
u64 mask; u64 mask;
void *context; struct notifier_block *nb;
irq_handler_t handler;
}; };
struct mlx5_eq * struct mlx5_eq *
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment