Commit 19098df2 authored by majd@mellanox.com's avatar majd@mellanox.com Committed by Doug Ledford

IB/mlx5: Refactor mlx5_ib_qp to accommodate other QP types

Extract specific IB QP fields to mlx5_ib_qp_trans structure.
The mlx5_core QP object resides in mlx5_ib_qp_base, which all QP types
inherit from. When we need to find mlx5_ib_qp using mlx5_core QP
(event handling and co), we use a pointer that resides in
mlx5_ib_qp_base.

In addition, we delete all redundant fields that weren't used anywhere
in the code:
-doorbell_qpn
-sq_max_wqes_per_wr
-sq_spare_wqes
Signed-off-by: default avatarMajd Dibbiny <majd@mellanox.com>
Reviewed-by: default avatarMatan Barak <matanb@mellanox.com>
Signed-off-by: default avatarDoug Ledford <dledford@redhat.com>
parent 146d2f1a
...@@ -191,35 +191,44 @@ struct mlx5_ib_pfault { ...@@ -191,35 +191,44 @@ struct mlx5_ib_pfault {
struct mlx5_pagefault mpfault; struct mlx5_pagefault mpfault;
}; };
struct mlx5_ib_ubuffer {
struct ib_umem *umem;
int buf_size;
u64 buf_addr;
};
struct mlx5_ib_qp_base {
struct mlx5_ib_qp *container_mibqp;
struct mlx5_core_qp mqp;
struct mlx5_ib_ubuffer ubuffer;
};
struct mlx5_ib_qp_trans {
struct mlx5_ib_qp_base base;
u16 xrcdn;
u8 alt_port;
u8 atomic_rd_en;
u8 resp_depth;
};
struct mlx5_ib_qp { struct mlx5_ib_qp {
struct ib_qp ibqp; struct ib_qp ibqp;
struct mlx5_core_qp mqp; struct mlx5_ib_qp_trans trans_qp;
struct mlx5_buf buf; struct mlx5_buf buf;
struct mlx5_db db; struct mlx5_db db;
struct mlx5_ib_wq rq; struct mlx5_ib_wq rq;
u32 doorbell_qpn;
u8 sq_signal_bits; u8 sq_signal_bits;
u8 fm_cache; u8 fm_cache;
int sq_max_wqes_per_wr;
int sq_spare_wqes;
struct mlx5_ib_wq sq; struct mlx5_ib_wq sq;
struct ib_umem *umem;
int buf_size;
/* serialize qp state modifications /* serialize qp state modifications
*/ */
struct mutex mutex; struct mutex mutex;
u16 xrcdn;
u32 flags; u32 flags;
u8 port; u8 port;
u8 alt_port;
u8 atomic_rd_en;
u8 resp_depth;
u8 state; u8 state;
int mlx_type;
int wq_sig; int wq_sig;
int scat_cqe; int scat_cqe;
int max_inline_data; int max_inline_data;
...@@ -489,7 +498,7 @@ static inline struct mlx5_ib_cq *to_mcq(struct ib_cq *ibcq) ...@@ -489,7 +498,7 @@ static inline struct mlx5_ib_cq *to_mcq(struct ib_cq *ibcq)
static inline struct mlx5_ib_qp *to_mibqp(struct mlx5_core_qp *mqp) static inline struct mlx5_ib_qp *to_mibqp(struct mlx5_core_qp *mqp)
{ {
return container_of(mqp, struct mlx5_ib_qp, mqp); return container_of(mqp, struct mlx5_ib_qp_base, mqp)->container_mibqp;
} }
static inline struct mlx5_ib_mr *to_mibmr(struct mlx5_core_mr *mmr) static inline struct mlx5_ib_mr *to_mibmr(struct mlx5_core_mr *mmr)
...@@ -567,7 +576,8 @@ int mlx5_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr, ...@@ -567,7 +576,8 @@ int mlx5_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
struct ib_recv_wr **bad_wr); struct ib_recv_wr **bad_wr);
void *mlx5_get_send_wqe(struct mlx5_ib_qp *qp, int n); void *mlx5_get_send_wqe(struct mlx5_ib_qp *qp, int n);
int mlx5_ib_read_user_wqe(struct mlx5_ib_qp *qp, int send, int wqe_index, int mlx5_ib_read_user_wqe(struct mlx5_ib_qp *qp, int send, int wqe_index,
void *buffer, u32 length); void *buffer, u32 length,
struct mlx5_ib_qp_base *base);
struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev, struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev,
const struct ib_cq_init_attr *attr, const struct ib_cq_init_attr *attr,
struct ib_ucontext *context, struct ib_ucontext *context,
......
...@@ -153,14 +153,16 @@ static struct mlx5_ib_mr *mlx5_ib_odp_find_mr_lkey(struct mlx5_ib_dev *dev, ...@@ -153,14 +153,16 @@ static struct mlx5_ib_mr *mlx5_ib_odp_find_mr_lkey(struct mlx5_ib_dev *dev,
static void mlx5_ib_page_fault_resume(struct mlx5_ib_qp *qp, static void mlx5_ib_page_fault_resume(struct mlx5_ib_qp *qp,
struct mlx5_ib_pfault *pfault, struct mlx5_ib_pfault *pfault,
int error) { int error)
{
struct mlx5_ib_dev *dev = to_mdev(qp->ibqp.pd->device); struct mlx5_ib_dev *dev = to_mdev(qp->ibqp.pd->device);
int ret = mlx5_core_page_fault_resume(dev->mdev, qp->mqp.qpn, u32 qpn = qp->trans_qp.base.mqp.qpn;
int ret = mlx5_core_page_fault_resume(dev->mdev,
qpn,
pfault->mpfault.flags, pfault->mpfault.flags,
error); error);
if (ret) if (ret)
pr_err("Failed to resolve the page fault on QP 0x%x\n", pr_err("Failed to resolve the page fault on QP 0x%x\n", qpn);
qp->mqp.qpn);
} }
/* /*
...@@ -391,6 +393,7 @@ static int mlx5_ib_mr_initiator_pfault_handler( ...@@ -391,6 +393,7 @@ static int mlx5_ib_mr_initiator_pfault_handler(
#if defined(DEBUG) #if defined(DEBUG)
u32 ctrl_wqe_index, ctrl_qpn; u32 ctrl_wqe_index, ctrl_qpn;
#endif #endif
u32 qpn = qp->trans_qp.base.mqp.qpn;
ds = be32_to_cpu(ctrl->qpn_ds) & MLX5_WQE_CTRL_DS_MASK; ds = be32_to_cpu(ctrl->qpn_ds) & MLX5_WQE_CTRL_DS_MASK;
if (ds * MLX5_WQE_DS_UNITS > wqe_length) { if (ds * MLX5_WQE_DS_UNITS > wqe_length) {
...@@ -401,7 +404,7 @@ static int mlx5_ib_mr_initiator_pfault_handler( ...@@ -401,7 +404,7 @@ static int mlx5_ib_mr_initiator_pfault_handler(
if (ds == 0) { if (ds == 0) {
mlx5_ib_err(dev, "Got WQE with zero DS. wqe_index=%x, qpn=%x\n", mlx5_ib_err(dev, "Got WQE with zero DS. wqe_index=%x, qpn=%x\n",
wqe_index, qp->mqp.qpn); wqe_index, qpn);
return -EFAULT; return -EFAULT;
} }
...@@ -411,16 +414,16 @@ static int mlx5_ib_mr_initiator_pfault_handler( ...@@ -411,16 +414,16 @@ static int mlx5_ib_mr_initiator_pfault_handler(
MLX5_WQE_CTRL_WQE_INDEX_SHIFT; MLX5_WQE_CTRL_WQE_INDEX_SHIFT;
if (wqe_index != ctrl_wqe_index) { if (wqe_index != ctrl_wqe_index) {
mlx5_ib_err(dev, "Got WQE with invalid wqe_index. wqe_index=0x%x, qpn=0x%x ctrl->wqe_index=0x%x\n", mlx5_ib_err(dev, "Got WQE with invalid wqe_index. wqe_index=0x%x, qpn=0x%x ctrl->wqe_index=0x%x\n",
wqe_index, qp->mqp.qpn, wqe_index, qpn,
ctrl_wqe_index); ctrl_wqe_index);
return -EFAULT; return -EFAULT;
} }
ctrl_qpn = (be32_to_cpu(ctrl->qpn_ds) & MLX5_WQE_CTRL_QPN_MASK) >> ctrl_qpn = (be32_to_cpu(ctrl->qpn_ds) & MLX5_WQE_CTRL_QPN_MASK) >>
MLX5_WQE_CTRL_QPN_SHIFT; MLX5_WQE_CTRL_QPN_SHIFT;
if (qp->mqp.qpn != ctrl_qpn) { if (qpn != ctrl_qpn) {
mlx5_ib_err(dev, "Got WQE with incorrect QP number. wqe_index=0x%x, qpn=0x%x ctrl->qpn=0x%x\n", mlx5_ib_err(dev, "Got WQE with incorrect QP number. wqe_index=0x%x, qpn=0x%x ctrl->qpn=0x%x\n",
wqe_index, qp->mqp.qpn, wqe_index, qpn,
ctrl_qpn); ctrl_qpn);
return -EFAULT; return -EFAULT;
} }
...@@ -537,6 +540,7 @@ static void mlx5_ib_mr_wqe_pfault_handler(struct mlx5_ib_qp *qp, ...@@ -537,6 +540,7 @@ static void mlx5_ib_mr_wqe_pfault_handler(struct mlx5_ib_qp *qp,
int resume_with_error = 0; int resume_with_error = 0;
u16 wqe_index = pfault->mpfault.wqe.wqe_index; u16 wqe_index = pfault->mpfault.wqe.wqe_index;
int requestor = pfault->mpfault.flags & MLX5_PFAULT_REQUESTOR; int requestor = pfault->mpfault.flags & MLX5_PFAULT_REQUESTOR;
u32 qpn = qp->trans_qp.base.mqp.qpn;
buffer = (char *)__get_free_page(GFP_KERNEL); buffer = (char *)__get_free_page(GFP_KERNEL);
if (!buffer) { if (!buffer) {
...@@ -546,10 +550,10 @@ static void mlx5_ib_mr_wqe_pfault_handler(struct mlx5_ib_qp *qp, ...@@ -546,10 +550,10 @@ static void mlx5_ib_mr_wqe_pfault_handler(struct mlx5_ib_qp *qp,
} }
ret = mlx5_ib_read_user_wqe(qp, requestor, wqe_index, buffer, ret = mlx5_ib_read_user_wqe(qp, requestor, wqe_index, buffer,
PAGE_SIZE); PAGE_SIZE, &qp->trans_qp.base);
if (ret < 0) { if (ret < 0) {
mlx5_ib_err(dev, "Failed reading a WQE following page fault, error=%x, wqe_index=%x, qpn=%x\n", mlx5_ib_err(dev, "Failed reading a WQE following page fault, error=%x, wqe_index=%x, qpn=%x\n",
-ret, wqe_index, qp->mqp.qpn); -ret, wqe_index, qpn);
resume_with_error = 1; resume_with_error = 1;
goto resolve_page_fault; goto resolve_page_fault;
} }
...@@ -586,7 +590,8 @@ static void mlx5_ib_mr_wqe_pfault_handler(struct mlx5_ib_qp *qp, ...@@ -586,7 +590,8 @@ static void mlx5_ib_mr_wqe_pfault_handler(struct mlx5_ib_qp *qp,
resolve_page_fault: resolve_page_fault:
mlx5_ib_page_fault_resume(qp, pfault, resume_with_error); mlx5_ib_page_fault_resume(qp, pfault, resume_with_error);
mlx5_ib_dbg(dev, "PAGE FAULT completed. QP 0x%x resume_with_error=%d, flags: 0x%x\n", mlx5_ib_dbg(dev, "PAGE FAULT completed. QP 0x%x resume_with_error=%d, flags: 0x%x\n",
qp->mqp.qpn, resume_with_error, pfault->mpfault.flags); qpn, resume_with_error,
pfault->mpfault.flags);
free_page((unsigned long)buffer); free_page((unsigned long)buffer);
} }
...@@ -753,7 +758,7 @@ void mlx5_ib_odp_create_qp(struct mlx5_ib_qp *qp) ...@@ -753,7 +758,7 @@ void mlx5_ib_odp_create_qp(struct mlx5_ib_qp *qp)
qp->disable_page_faults = 1; qp->disable_page_faults = 1;
spin_lock_init(&qp->disable_page_faults_lock); spin_lock_init(&qp->disable_page_faults_lock);
qp->mqp.pfault_handler = mlx5_ib_pfault_handler; qp->trans_qp.base.mqp.pfault_handler = mlx5_ib_pfault_handler;
for (i = 0; i < MLX5_IB_PAGEFAULT_CONTEXTS; ++i) for (i = 0; i < MLX5_IB_PAGEFAULT_CONTEXTS; ++i)
INIT_WORK(&qp->pagefaults[i].work, mlx5_ib_qp_pfault_action); INIT_WORK(&qp->pagefaults[i].work, mlx5_ib_qp_pfault_action);
......
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment