Commit 5903325a authored by Eli Cohen's avatar Eli Cohen Committed by David S. Miller

net/mlx5_core: Identify resources by their type

This patch puts a common part as the first field of mlx5_core_qp. This field is
used to identify which resource generated an event. This is required since upcoming
new resource types such as DC targets are allocated for the same numerical space
as regular QPs and may generate the same events. By searching the resource in the
same table we can then look at the common field to identify the resource.
Signed-off-by: default avatarEli Cohen <eli@mellanox.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent b775516b
...@@ -198,7 +198,7 @@ static int mlx5_eq_int(struct mlx5_core_dev *dev, struct mlx5_eq *eq) ...@@ -198,7 +198,7 @@ static int mlx5_eq_int(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
int eqes_found = 0; int eqes_found = 0;
int set_ci = 0; int set_ci = 0;
u32 cqn; u32 cqn;
u32 srqn; u32 rsn;
u8 port; u8 port;
while ((eqe = next_eqe_sw(eq))) { while ((eqe = next_eqe_sw(eq))) {
...@@ -224,18 +224,18 @@ static int mlx5_eq_int(struct mlx5_core_dev *dev, struct mlx5_eq *eq) ...@@ -224,18 +224,18 @@ static int mlx5_eq_int(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
case MLX5_EVENT_TYPE_PATH_MIG_FAILED: case MLX5_EVENT_TYPE_PATH_MIG_FAILED:
case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR: case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR: case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR:
rsn = be32_to_cpu(eqe->data.qp_srq.qp_srq_n) & 0xffffff;
mlx5_core_dbg(dev, "event %s(%d) arrived\n", mlx5_core_dbg(dev, "event %s(%d) arrived\n",
eqe_type_str(eqe->type), eqe->type); eqe_type_str(eqe->type), eqe->type);
mlx5_qp_event(dev, be32_to_cpu(eqe->data.qp_srq.qp_srq_n) & 0xffffff, mlx5_rsc_event(dev, rsn, eqe->type);
eqe->type);
break; break;
case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT: case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT:
case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR: case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR:
srqn = be32_to_cpu(eqe->data.qp_srq.qp_srq_n) & 0xffffff; rsn = be32_to_cpu(eqe->data.qp_srq.qp_srq_n) & 0xffffff;
mlx5_core_dbg(dev, "SRQ event %s(%d): srqn 0x%x\n", mlx5_core_dbg(dev, "SRQ event %s(%d): srqn 0x%x\n",
eqe_type_str(eqe->type), eqe->type, srqn); eqe_type_str(eqe->type), eqe->type, rsn);
mlx5_srq_event(dev, srqn, eqe->type); mlx5_srq_event(dev, rsn, eqe->type);
break; break;
case MLX5_EVENT_TYPE_CMD: case MLX5_EVENT_TYPE_CMD:
......
...@@ -39,28 +39,53 @@ ...@@ -39,28 +39,53 @@
#include "mlx5_core.h" #include "mlx5_core.h"
void mlx5_qp_event(struct mlx5_core_dev *dev, u32 qpn, int event_type) static struct mlx5_core_rsc_common *mlx5_get_rsc(struct mlx5_core_dev *dev,
u32 rsn)
{ {
struct mlx5_qp_table *table = &dev->priv.qp_table; struct mlx5_qp_table *table = &dev->priv.qp_table;
struct mlx5_core_qp *qp; struct mlx5_core_rsc_common *common;
spin_lock(&table->lock); spin_lock(&table->lock);
qp = radix_tree_lookup(&table->tree, qpn); common = radix_tree_lookup(&table->tree, rsn);
if (qp) if (common)
atomic_inc(&qp->refcount); atomic_inc(&common->refcount);
spin_unlock(&table->lock); spin_unlock(&table->lock);
if (!qp) { if (!common) {
mlx5_core_warn(dev, "Async event for bogus QP 0x%x\n", qpn); mlx5_core_warn(dev, "Async event for bogus resource 0x%x\n",
return; rsn);
return NULL;
} }
return common;
}
qp->event(qp, event_type); void mlx5_core_put_rsc(struct mlx5_core_rsc_common *common)
{
if (atomic_dec_and_test(&common->refcount))
complete(&common->free);
}
void mlx5_rsc_event(struct mlx5_core_dev *dev, u32 rsn, int event_type)
{
struct mlx5_core_rsc_common *common = mlx5_get_rsc(dev, rsn);
struct mlx5_core_qp *qp;
if (!common)
return;
switch (common->res) {
case MLX5_RES_QP:
qp = (struct mlx5_core_qp *)common;
qp->event(qp, event_type);
break;
default:
mlx5_core_warn(dev, "invalid resource type for 0x%x\n", rsn);
}
if (atomic_dec_and_test(&qp->refcount)) mlx5_core_put_rsc(common);
complete(&qp->free);
} }
int mlx5_core_create_qp(struct mlx5_core_dev *dev, int mlx5_core_create_qp(struct mlx5_core_dev *dev,
...@@ -92,6 +117,7 @@ int mlx5_core_create_qp(struct mlx5_core_dev *dev, ...@@ -92,6 +117,7 @@ int mlx5_core_create_qp(struct mlx5_core_dev *dev,
qp->qpn = be32_to_cpu(out.qpn) & 0xffffff; qp->qpn = be32_to_cpu(out.qpn) & 0xffffff;
mlx5_core_dbg(dev, "qpn = 0x%x\n", qp->qpn); mlx5_core_dbg(dev, "qpn = 0x%x\n", qp->qpn);
qp->common.res = MLX5_RES_QP;
spin_lock_irq(&table->lock); spin_lock_irq(&table->lock);
err = radix_tree_insert(&table->tree, qp->qpn, qp); err = radix_tree_insert(&table->tree, qp->qpn, qp);
spin_unlock_irq(&table->lock); spin_unlock_irq(&table->lock);
...@@ -106,9 +132,9 @@ int mlx5_core_create_qp(struct mlx5_core_dev *dev, ...@@ -106,9 +132,9 @@ int mlx5_core_create_qp(struct mlx5_core_dev *dev,
qp->qpn); qp->qpn);
qp->pid = current->pid; qp->pid = current->pid;
atomic_set(&qp->refcount, 1); atomic_set(&qp->common.refcount, 1);
atomic_inc(&dev->num_qps); atomic_inc(&dev->num_qps);
init_completion(&qp->free); init_completion(&qp->common.free);
return 0; return 0;
...@@ -138,9 +164,8 @@ int mlx5_core_destroy_qp(struct mlx5_core_dev *dev, ...@@ -138,9 +164,8 @@ int mlx5_core_destroy_qp(struct mlx5_core_dev *dev,
radix_tree_delete(&table->tree, qp->qpn); radix_tree_delete(&table->tree, qp->qpn);
spin_unlock_irqrestore(&table->lock, flags); spin_unlock_irqrestore(&table->lock, flags);
if (atomic_dec_and_test(&qp->refcount)) mlx5_core_put_rsc((struct mlx5_core_rsc_common *)qp);
complete(&qp->free); wait_for_completion(&qp->common.free);
wait_for_completion(&qp->free);
memset(&in, 0, sizeof(in)); memset(&in, 0, sizeof(in));
memset(&out, 0, sizeof(out)); memset(&out, 0, sizeof(out));
......
...@@ -375,6 +375,16 @@ struct mlx5_core_mr { ...@@ -375,6 +375,16 @@ struct mlx5_core_mr {
u32 pd; u32 pd;
}; };
enum mlx5_res_type {
MLX5_RES_QP,
};
struct mlx5_core_rsc_common {
enum mlx5_res_type res;
atomic_t refcount;
struct completion free;
};
struct mlx5_core_srq { struct mlx5_core_srq {
u32 srqn; u32 srqn;
int max; int max;
...@@ -700,7 +710,7 @@ int mlx5_eq_init(struct mlx5_core_dev *dev); ...@@ -700,7 +710,7 @@ int mlx5_eq_init(struct mlx5_core_dev *dev);
void mlx5_eq_cleanup(struct mlx5_core_dev *dev); void mlx5_eq_cleanup(struct mlx5_core_dev *dev);
void mlx5_fill_page_array(struct mlx5_buf *buf, __be64 *pas); void mlx5_fill_page_array(struct mlx5_buf *buf, __be64 *pas);
void mlx5_cq_completion(struct mlx5_core_dev *dev, u32 cqn); void mlx5_cq_completion(struct mlx5_core_dev *dev, u32 cqn);
void mlx5_qp_event(struct mlx5_core_dev *dev, u32 qpn, int event_type); void mlx5_rsc_event(struct mlx5_core_dev *dev, u32 rsn, int event_type);
void mlx5_srq_event(struct mlx5_core_dev *dev, u32 srqn, int event_type); void mlx5_srq_event(struct mlx5_core_dev *dev, u32 srqn, int event_type);
struct mlx5_core_srq *mlx5_core_get_srq(struct mlx5_core_dev *dev, u32 srqn); struct mlx5_core_srq *mlx5_core_get_srq(struct mlx5_core_dev *dev, u32 srqn);
void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, unsigned long vector); void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, unsigned long vector);
...@@ -737,6 +747,7 @@ void mlx5_cmdif_debugfs_cleanup(struct mlx5_core_dev *dev); ...@@ -737,6 +747,7 @@ void mlx5_cmdif_debugfs_cleanup(struct mlx5_core_dev *dev);
int mlx5_core_create_psv(struct mlx5_core_dev *dev, u32 pdn, int mlx5_core_create_psv(struct mlx5_core_dev *dev, u32 pdn,
int npsvs, u32 *sig_index); int npsvs, u32 *sig_index);
int mlx5_core_destroy_psv(struct mlx5_core_dev *dev, int psv_num); int mlx5_core_destroy_psv(struct mlx5_core_dev *dev, int psv_num);
void mlx5_core_put_rsc(struct mlx5_core_rsc_common *common);
static inline u32 mlx5_mkey_to_idx(u32 mkey) static inline u32 mlx5_mkey_to_idx(u32 mkey)
{ {
......
...@@ -342,10 +342,9 @@ struct mlx5_stride_block_ctrl_seg { ...@@ -342,10 +342,9 @@ struct mlx5_stride_block_ctrl_seg {
}; };
struct mlx5_core_qp { struct mlx5_core_qp {
struct mlx5_core_rsc_common common; /* must be first */
void (*event) (struct mlx5_core_qp *, int); void (*event) (struct mlx5_core_qp *, int);
int qpn; int qpn;
atomic_t refcount;
struct completion free;
struct mlx5_rsc_debug *dbg; struct mlx5_rsc_debug *dbg;
int pid; int pid;
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment