Commit 21c2fe94 authored by Leon Romanovsky's avatar Leon Romanovsky Committed by Jason Gunthorpe

RDMA/mthca: Combine special QP struct with mthca QP

As preparation for the removal of QP allocation logic, we need to ensure
that ib_core allocates the right amount of memory before a call to the
driver create_qp(). It requires from driver to have the same structs for
all types of QPs.

Link: https://lore.kernel.org/r/20200926102450.2966017-10-leon@kernel.orgSigned-off-by: default avatarLeon Romanovsky <leonro@nvidia.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@nvidia.com>
parent b925c555
...@@ -548,7 +548,7 @@ int mthca_alloc_sqp(struct mthca_dev *dev, ...@@ -548,7 +548,7 @@ int mthca_alloc_sqp(struct mthca_dev *dev,
struct ib_qp_cap *cap, struct ib_qp_cap *cap,
int qpn, int qpn,
int port, int port,
struct mthca_sqp *sqp, struct mthca_qp *qp,
struct ib_udata *udata); struct ib_udata *udata);
void mthca_free_qp(struct mthca_dev *dev, struct mthca_qp *qp); void mthca_free_qp(struct mthca_dev *dev, struct mthca_qp *qp);
int mthca_create_ah(struct mthca_dev *dev, int mthca_create_ah(struct mthca_dev *dev,
......
...@@ -535,9 +535,14 @@ static struct ib_qp *mthca_create_qp(struct ib_pd *pd, ...@@ -535,9 +535,14 @@ static struct ib_qp *mthca_create_qp(struct ib_pd *pd,
case IB_QPT_SMI: case IB_QPT_SMI:
case IB_QPT_GSI: case IB_QPT_GSI:
{ {
qp = kzalloc(sizeof(struct mthca_sqp), GFP_KERNEL); qp = kzalloc(sizeof(*qp), GFP_KERNEL);
if (!qp) if (!qp)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
qp->sqp = kzalloc(sizeof(struct mthca_sqp), GFP_KERNEL);
if (!qp->sqp) {
kfree(qp);
return ERR_PTR(-ENOMEM);
}
qp->ibqp.qp_num = init_attr->qp_type == IB_QPT_SMI ? 0 : 1; qp->ibqp.qp_num = init_attr->qp_type == IB_QPT_SMI ? 0 : 1;
...@@ -546,7 +551,7 @@ static struct ib_qp *mthca_create_qp(struct ib_pd *pd, ...@@ -546,7 +551,7 @@ static struct ib_qp *mthca_create_qp(struct ib_pd *pd,
to_mcq(init_attr->recv_cq), to_mcq(init_attr->recv_cq),
init_attr->sq_sig_type, &init_attr->cap, init_attr->sq_sig_type, &init_attr->cap,
qp->ibqp.qp_num, init_attr->port_num, qp->ibqp.qp_num, init_attr->port_num,
to_msqp(qp), udata); qp, udata);
break; break;
} }
default: default:
...@@ -555,6 +560,7 @@ static struct ib_qp *mthca_create_qp(struct ib_pd *pd, ...@@ -555,6 +560,7 @@ static struct ib_qp *mthca_create_qp(struct ib_pd *pd,
} }
if (err) { if (err) {
kfree(qp->sqp);
kfree(qp); kfree(qp);
return ERR_PTR(err); return ERR_PTR(err);
} }
...@@ -587,7 +593,8 @@ static int mthca_destroy_qp(struct ib_qp *qp, struct ib_udata *udata) ...@@ -587,7 +593,8 @@ static int mthca_destroy_qp(struct ib_qp *qp, struct ib_udata *udata)
to_mqp(qp)->rq.db_index); to_mqp(qp)->rq.db_index);
} }
mthca_free_qp(to_mdev(qp->device), to_mqp(qp)); mthca_free_qp(to_mdev(qp->device), to_mqp(qp));
kfree(qp); kfree(to_mqp(qp)->sqp);
kfree(to_mqp(qp));
return 0; return 0;
} }
......
...@@ -240,6 +240,16 @@ struct mthca_wq { ...@@ -240,6 +240,16 @@ struct mthca_wq {
__be32 *db; __be32 *db;
}; };
struct mthca_sqp {
int pkey_index;
u32 qkey;
u32 send_psn;
struct ib_ud_header ud_header;
int header_buf_size;
void *header_buf;
dma_addr_t header_dma;
};
struct mthca_qp { struct mthca_qp {
struct ib_qp ibqp; struct ib_qp ibqp;
int refcount; int refcount;
...@@ -265,17 +275,7 @@ struct mthca_qp { ...@@ -265,17 +275,7 @@ struct mthca_qp {
wait_queue_head_t wait; wait_queue_head_t wait;
struct mutex mutex; struct mutex mutex;
}; struct mthca_sqp *sqp;
struct mthca_sqp {
struct mthca_qp qp;
int pkey_index;
u32 qkey;
u32 send_psn;
struct ib_ud_header ud_header;
int header_buf_size;
void *header_buf;
dma_addr_t header_dma;
}; };
static inline struct mthca_ucontext *to_mucontext(struct ib_ucontext *ibucontext) static inline struct mthca_ucontext *to_mucontext(struct ib_ucontext *ibucontext)
...@@ -313,9 +313,4 @@ static inline struct mthca_qp *to_mqp(struct ib_qp *ibqp) ...@@ -313,9 +313,4 @@ static inline struct mthca_qp *to_mqp(struct ib_qp *ibqp)
return container_of(ibqp, struct mthca_qp, ibqp); return container_of(ibqp, struct mthca_qp, ibqp);
} }
static inline struct mthca_sqp *to_msqp(struct mthca_qp *qp)
{
return container_of(qp, struct mthca_sqp, qp);
}
#endif /* MTHCA_PROVIDER_H */ #endif /* MTHCA_PROVIDER_H */
...@@ -809,7 +809,7 @@ static int __mthca_modify_qp(struct ib_qp *ibqp, ...@@ -809,7 +809,7 @@ static int __mthca_modify_qp(struct ib_qp *ibqp,
qp->alt_port = attr->alt_port_num; qp->alt_port = attr->alt_port_num;
if (is_sqp(dev, qp)) if (is_sqp(dev, qp))
store_attrs(to_msqp(qp), attr, attr_mask); store_attrs(qp->sqp, attr, attr_mask);
/* /*
* If we moved QP0 to RTR, bring the IB link up; if we moved * If we moved QP0 to RTR, bring the IB link up; if we moved
...@@ -1368,39 +1368,40 @@ int mthca_alloc_sqp(struct mthca_dev *dev, ...@@ -1368,39 +1368,40 @@ int mthca_alloc_sqp(struct mthca_dev *dev,
struct ib_qp_cap *cap, struct ib_qp_cap *cap,
int qpn, int qpn,
int port, int port,
struct mthca_sqp *sqp, struct mthca_qp *qp,
struct ib_udata *udata) struct ib_udata *udata)
{ {
u32 mqpn = qpn * 2 + dev->qp_table.sqp_start + port - 1; u32 mqpn = qpn * 2 + dev->qp_table.sqp_start + port - 1;
int err; int err;
sqp->qp.transport = MLX; qp->transport = MLX;
err = mthca_set_qp_size(dev, cap, pd, &sqp->qp); err = mthca_set_qp_size(dev, cap, pd, qp);
if (err) if (err)
return err; return err;
sqp->header_buf_size = sqp->qp.sq.max * MTHCA_UD_HEADER_SIZE; qp->sqp->header_buf_size = qp->sq.max * MTHCA_UD_HEADER_SIZE;
sqp->header_buf = dma_alloc_coherent(&dev->pdev->dev, sqp->header_buf_size, qp->sqp->header_buf =
&sqp->header_dma, GFP_KERNEL); dma_alloc_coherent(&dev->pdev->dev, qp->sqp->header_buf_size,
if (!sqp->header_buf) &qp->sqp->header_dma, GFP_KERNEL);
if (!qp->sqp->header_buf)
return -ENOMEM; return -ENOMEM;
spin_lock_irq(&dev->qp_table.lock); spin_lock_irq(&dev->qp_table.lock);
if (mthca_array_get(&dev->qp_table.qp, mqpn)) if (mthca_array_get(&dev->qp_table.qp, mqpn))
err = -EBUSY; err = -EBUSY;
else else
mthca_array_set(&dev->qp_table.qp, mqpn, sqp); mthca_array_set(&dev->qp_table.qp, mqpn, qp->sqp);
spin_unlock_irq(&dev->qp_table.lock); spin_unlock_irq(&dev->qp_table.lock);
if (err) if (err)
goto err_out; goto err_out;
sqp->qp.port = port; qp->port = port;
sqp->qp.qpn = mqpn; qp->qpn = mqpn;
sqp->qp.transport = MLX; qp->transport = MLX;
err = mthca_alloc_qp_common(dev, pd, send_cq, recv_cq, err = mthca_alloc_qp_common(dev, pd, send_cq, recv_cq,
send_policy, &sqp->qp, udata); send_policy, qp, udata);
if (err) if (err)
goto err_out_free; goto err_out_free;
...@@ -1421,10 +1422,9 @@ int mthca_alloc_sqp(struct mthca_dev *dev, ...@@ -1421,10 +1422,9 @@ int mthca_alloc_sqp(struct mthca_dev *dev,
mthca_unlock_cqs(send_cq, recv_cq); mthca_unlock_cqs(send_cq, recv_cq);
err_out: err_out:
dma_free_coherent(&dev->pdev->dev, sqp->header_buf_size, dma_free_coherent(&dev->pdev->dev, qp->sqp->header_buf_size,
sqp->header_buf, sqp->header_dma); qp->sqp->header_buf, qp->sqp->header_dma);
return err; return err;
} }
...@@ -1487,20 +1487,19 @@ void mthca_free_qp(struct mthca_dev *dev, ...@@ -1487,20 +1487,19 @@ void mthca_free_qp(struct mthca_dev *dev,
if (is_sqp(dev, qp)) { if (is_sqp(dev, qp)) {
atomic_dec(&(to_mpd(qp->ibqp.pd)->sqp_count)); atomic_dec(&(to_mpd(qp->ibqp.pd)->sqp_count));
dma_free_coherent(&dev->pdev->dev, dma_free_coherent(&dev->pdev->dev, qp->sqp->header_buf_size,
to_msqp(qp)->header_buf_size, qp->sqp->header_buf, qp->sqp->header_dma);
to_msqp(qp)->header_buf,
to_msqp(qp)->header_dma);
} else } else
mthca_free(&dev->qp_table.alloc, qp->qpn); mthca_free(&dev->qp_table.alloc, qp->qpn);
} }
/* Create UD header for an MLX send and build a data segment for it */ /* Create UD header for an MLX send and build a data segment for it */
static int build_mlx_header(struct mthca_dev *dev, struct mthca_sqp *sqp, static int build_mlx_header(struct mthca_dev *dev, struct mthca_qp *qp, int ind,
int ind, const struct ib_ud_wr *wr, const struct ib_ud_wr *wr,
struct mthca_mlx_seg *mlx, struct mthca_mlx_seg *mlx,
struct mthca_data_seg *data) struct mthca_data_seg *data)
{ {
struct mthca_sqp *sqp = qp->sqp;
int header_size; int header_size;
int err; int err;
u16 pkey; u16 pkey;
...@@ -1513,7 +1512,7 @@ static int build_mlx_header(struct mthca_dev *dev, struct mthca_sqp *sqp, ...@@ -1513,7 +1512,7 @@ static int build_mlx_header(struct mthca_dev *dev, struct mthca_sqp *sqp,
if (err) if (err)
return err; return err;
mlx->flags &= ~cpu_to_be32(MTHCA_NEXT_SOLICIT | 1); mlx->flags &= ~cpu_to_be32(MTHCA_NEXT_SOLICIT | 1);
mlx->flags |= cpu_to_be32((!sqp->qp.ibqp.qp_num ? MTHCA_MLX_VL15 : 0) | mlx->flags |= cpu_to_be32((!qp->ibqp.qp_num ? MTHCA_MLX_VL15 : 0) |
(sqp->ud_header.lrh.destination_lid == (sqp->ud_header.lrh.destination_lid ==
IB_LID_PERMISSIVE ? MTHCA_MLX_SLR : 0) | IB_LID_PERMISSIVE ? MTHCA_MLX_SLR : 0) |
(sqp->ud_header.lrh.service_level << 8)); (sqp->ud_header.lrh.service_level << 8));
...@@ -1534,29 +1533,29 @@ static int build_mlx_header(struct mthca_dev *dev, struct mthca_sqp *sqp, ...@@ -1534,29 +1533,29 @@ static int build_mlx_header(struct mthca_dev *dev, struct mthca_sqp *sqp,
return -EINVAL; return -EINVAL;
} }
sqp->ud_header.lrh.virtual_lane = !sqp->qp.ibqp.qp_num ? 15 : 0; sqp->ud_header.lrh.virtual_lane = !qp->ibqp.qp_num ? 15 : 0;
if (sqp->ud_header.lrh.destination_lid == IB_LID_PERMISSIVE) if (sqp->ud_header.lrh.destination_lid == IB_LID_PERMISSIVE)
sqp->ud_header.lrh.source_lid = IB_LID_PERMISSIVE; sqp->ud_header.lrh.source_lid = IB_LID_PERMISSIVE;
sqp->ud_header.bth.solicited_event = !!(wr->wr.send_flags & IB_SEND_SOLICITED); sqp->ud_header.bth.solicited_event = !!(wr->wr.send_flags & IB_SEND_SOLICITED);
if (!sqp->qp.ibqp.qp_num) if (!qp->ibqp.qp_num)
ib_get_cached_pkey(&dev->ib_dev, sqp->qp.port, ib_get_cached_pkey(&dev->ib_dev, qp->port, sqp->pkey_index,
sqp->pkey_index, &pkey); &pkey);
else else
ib_get_cached_pkey(&dev->ib_dev, sqp->qp.port, ib_get_cached_pkey(&dev->ib_dev, qp->port, wr->pkey_index,
wr->pkey_index, &pkey); &pkey);
sqp->ud_header.bth.pkey = cpu_to_be16(pkey); sqp->ud_header.bth.pkey = cpu_to_be16(pkey);
sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->remote_qpn); sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->remote_qpn);
sqp->ud_header.bth.psn = cpu_to_be32((sqp->send_psn++) & ((1 << 24) - 1)); sqp->ud_header.bth.psn = cpu_to_be32((sqp->send_psn++) & ((1 << 24) - 1));
sqp->ud_header.deth.qkey = cpu_to_be32(wr->remote_qkey & 0x80000000 ? sqp->ud_header.deth.qkey = cpu_to_be32(wr->remote_qkey & 0x80000000 ?
sqp->qkey : wr->remote_qkey); sqp->qkey : wr->remote_qkey);
sqp->ud_header.deth.source_qpn = cpu_to_be32(sqp->qp.ibqp.qp_num); sqp->ud_header.deth.source_qpn = cpu_to_be32(qp->ibqp.qp_num);
header_size = ib_ud_header_pack(&sqp->ud_header, header_size = ib_ud_header_pack(&sqp->ud_header,
sqp->header_buf + sqp->header_buf +
ind * MTHCA_UD_HEADER_SIZE); ind * MTHCA_UD_HEADER_SIZE);
data->byte_count = cpu_to_be32(header_size); data->byte_count = cpu_to_be32(header_size);
data->lkey = cpu_to_be32(to_mpd(sqp->qp.ibqp.pd)->ntmr.ibmr.lkey); data->lkey = cpu_to_be32(to_mpd(qp->ibqp.pd)->ntmr.ibmr.lkey);
data->addr = cpu_to_be64(sqp->header_dma + data->addr = cpu_to_be64(sqp->header_dma +
ind * MTHCA_UD_HEADER_SIZE); ind * MTHCA_UD_HEADER_SIZE);
...@@ -1735,9 +1734,9 @@ int mthca_tavor_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr, ...@@ -1735,9 +1734,9 @@ int mthca_tavor_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
break; break;
case MLX: case MLX:
err = build_mlx_header(dev, to_msqp(qp), ind, ud_wr(wr), err = build_mlx_header(
wqe - sizeof (struct mthca_next_seg), dev, qp, ind, ud_wr(wr),
wqe); wqe - sizeof(struct mthca_next_seg), wqe);
if (err) { if (err) {
*bad_wr = wr; *bad_wr = wr;
goto out; goto out;
...@@ -2065,9 +2064,9 @@ int mthca_arbel_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr, ...@@ -2065,9 +2064,9 @@ int mthca_arbel_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
break; break;
case MLX: case MLX:
err = build_mlx_header(dev, to_msqp(qp), ind, ud_wr(wr), err = build_mlx_header(
wqe - sizeof (struct mthca_next_seg), dev, qp, ind, ud_wr(wr),
wqe); wqe - sizeof(struct mthca_next_seg), wqe);
if (err) { if (err) {
*bad_wr = wr; *bad_wr = wr;
goto out; goto out;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment