Commit ec34a922 authored by Roland Dreier's avatar Roland Dreier Committed by Roland Dreier

[PATCH] IB/mthca: Add SRQ implementation

Add mthca support for shared receive queues (SRQs),
including userspace SRQs.
Signed-off-by: default avatarRoland Dreier <rolandd@cisco.com>
parent d20a4019
...@@ -9,4 +9,4 @@ obj-$(CONFIG_INFINIBAND_MTHCA) += ib_mthca.o ...@@ -9,4 +9,4 @@ obj-$(CONFIG_INFINIBAND_MTHCA) += ib_mthca.o
ib_mthca-y := mthca_main.o mthca_cmd.o mthca_profile.o mthca_reset.o \ ib_mthca-y := mthca_main.o mthca_cmd.o mthca_profile.o mthca_reset.o \
mthca_allocator.o mthca_eq.o mthca_pd.o mthca_cq.o \ mthca_allocator.o mthca_eq.o mthca_pd.o mthca_cq.o \
mthca_mr.o mthca_qp.o mthca_av.o mthca_mcg.o mthca_mad.o \ mthca_mr.o mthca_qp.o mthca_av.o mthca_mcg.o mthca_mad.o \
mthca_provider.o mthca_memfree.o mthca_uar.o mthca_provider.o mthca_memfree.o mthca_uar.o mthca_srq.o
...@@ -109,6 +109,7 @@ enum { ...@@ -109,6 +109,7 @@ enum {
CMD_SW2HW_SRQ = 0x35, CMD_SW2HW_SRQ = 0x35,
CMD_HW2SW_SRQ = 0x36, CMD_HW2SW_SRQ = 0x36,
CMD_QUERY_SRQ = 0x37, CMD_QUERY_SRQ = 0x37,
CMD_ARM_SRQ = 0x40,
/* QP/EE commands */ /* QP/EE commands */
CMD_RST2INIT_QPEE = 0x19, CMD_RST2INIT_QPEE = 0x19,
...@@ -1032,6 +1033,8 @@ int mthca_QUERY_DEV_LIM(struct mthca_dev *dev, ...@@ -1032,6 +1033,8 @@ int mthca_QUERY_DEV_LIM(struct mthca_dev *dev,
mthca_dbg(dev, "Max QPs: %d, reserved QPs: %d, entry size: %d\n", mthca_dbg(dev, "Max QPs: %d, reserved QPs: %d, entry size: %d\n",
dev_lim->max_qps, dev_lim->reserved_qps, dev_lim->qpc_entry_sz); dev_lim->max_qps, dev_lim->reserved_qps, dev_lim->qpc_entry_sz);
mthca_dbg(dev, "Max SRQs: %d, reserved SRQs: %d, entry size: %d\n",
dev_lim->max_srqs, dev_lim->reserved_srqs, dev_lim->srq_entry_sz);
mthca_dbg(dev, "Max CQs: %d, reserved CQs: %d, entry size: %d\n", mthca_dbg(dev, "Max CQs: %d, reserved CQs: %d, entry size: %d\n",
dev_lim->max_cqs, dev_lim->reserved_cqs, dev_lim->cqc_entry_sz); dev_lim->max_cqs, dev_lim->reserved_cqs, dev_lim->cqc_entry_sz);
mthca_dbg(dev, "Max EQs: %d, reserved EQs: %d, entry size: %d\n", mthca_dbg(dev, "Max EQs: %d, reserved EQs: %d, entry size: %d\n",
...@@ -1500,6 +1503,27 @@ int mthca_HW2SW_CQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox, ...@@ -1500,6 +1503,27 @@ int mthca_HW2SW_CQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
CMD_TIME_CLASS_A, status); CMD_TIME_CLASS_A, status);
} }
int mthca_SW2HW_SRQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
int srq_num, u8 *status)
{
return mthca_cmd(dev, mailbox->dma, srq_num, 0, CMD_SW2HW_SRQ,
CMD_TIME_CLASS_A, status);
}
int mthca_HW2SW_SRQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
int srq_num, u8 *status)
{
return mthca_cmd_box(dev, 0, mailbox->dma, srq_num, 0,
CMD_HW2SW_SRQ,
CMD_TIME_CLASS_A, status);
}
int mthca_ARM_SRQ(struct mthca_dev *dev, int srq_num, int limit, u8 *status)
{
return mthca_cmd(dev, limit, srq_num, 0, CMD_ARM_SRQ,
CMD_TIME_CLASS_B, status);
}
int mthca_MODIFY_QP(struct mthca_dev *dev, int trans, u32 num, int mthca_MODIFY_QP(struct mthca_dev *dev, int trans, u32 num,
int is_ee, struct mthca_mailbox *mailbox, u32 optmask, int is_ee, struct mthca_mailbox *mailbox, u32 optmask,
u8 *status) u8 *status)
......
...@@ -298,6 +298,11 @@ int mthca_SW2HW_CQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox, ...@@ -298,6 +298,11 @@ int mthca_SW2HW_CQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
int cq_num, u8 *status); int cq_num, u8 *status);
int mthca_HW2SW_CQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox, int mthca_HW2SW_CQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
int cq_num, u8 *status); int cq_num, u8 *status);
int mthca_SW2HW_SRQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
int srq_num, u8 *status);
int mthca_HW2SW_SRQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
int srq_num, u8 *status);
int mthca_ARM_SRQ(struct mthca_dev *dev, int srq_num, int limit, u8 *status);
int mthca_MODIFY_QP(struct mthca_dev *dev, int trans, u32 num, int mthca_MODIFY_QP(struct mthca_dev *dev, int trans, u32 num,
int is_ee, struct mthca_mailbox *mailbox, u32 optmask, int is_ee, struct mthca_mailbox *mailbox, u32 optmask,
u8 *status); u8 *status);
......
...@@ -224,7 +224,8 @@ void mthca_cq_event(struct mthca_dev *dev, u32 cqn) ...@@ -224,7 +224,8 @@ void mthca_cq_event(struct mthca_dev *dev, u32 cqn)
cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context); cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
} }
void mthca_cq_clean(struct mthca_dev *dev, u32 cqn, u32 qpn) void mthca_cq_clean(struct mthca_dev *dev, u32 cqn, u32 qpn,
struct mthca_srq *srq)
{ {
struct mthca_cq *cq; struct mthca_cq *cq;
struct mthca_cqe *cqe; struct mthca_cqe *cqe;
...@@ -265,8 +266,11 @@ void mthca_cq_clean(struct mthca_dev *dev, u32 cqn, u32 qpn) ...@@ -265,8 +266,11 @@ void mthca_cq_clean(struct mthca_dev *dev, u32 cqn, u32 qpn)
*/ */
while (prod_index > cq->cons_index) { while (prod_index > cq->cons_index) {
cqe = get_cqe(cq, (prod_index - 1) & cq->ibcq.cqe); cqe = get_cqe(cq, (prod_index - 1) & cq->ibcq.cqe);
if (cqe->my_qpn == cpu_to_be32(qpn)) if (cqe->my_qpn == cpu_to_be32(qpn)) {
if (srq)
mthca_free_srq_wqe(srq, be32_to_cpu(cqe->wqe));
++nfreed; ++nfreed;
}
else if (nfreed) else if (nfreed)
memcpy(get_cqe(cq, (prod_index - 1 + nfreed) & memcpy(get_cqe(cq, (prod_index - 1 + nfreed) &
cq->ibcq.cqe), cq->ibcq.cqe),
...@@ -455,23 +459,27 @@ static inline int mthca_poll_one(struct mthca_dev *dev, ...@@ -455,23 +459,27 @@ static inline int mthca_poll_one(struct mthca_dev *dev,
>> wq->wqe_shift); >> wq->wqe_shift);
entry->wr_id = (*cur_qp)->wrid[wqe_index + entry->wr_id = (*cur_qp)->wrid[wqe_index +
(*cur_qp)->rq.max]; (*cur_qp)->rq.max];
} else if ((*cur_qp)->ibqp.srq) {
struct mthca_srq *srq = to_msrq((*cur_qp)->ibqp.srq);
u32 wqe = be32_to_cpu(cqe->wqe);
wq = NULL;
wqe_index = wqe >> srq->wqe_shift;
entry->wr_id = srq->wrid[wqe_index];
mthca_free_srq_wqe(srq, wqe);
} else { } else {
wq = &(*cur_qp)->rq; wq = &(*cur_qp)->rq;
wqe_index = be32_to_cpu(cqe->wqe) >> wq->wqe_shift; wqe_index = be32_to_cpu(cqe->wqe) >> wq->wqe_shift;
entry->wr_id = (*cur_qp)->wrid[wqe_index]; entry->wr_id = (*cur_qp)->wrid[wqe_index];
} }
if (wq) {
if (wq->last_comp < wqe_index) if (wq->last_comp < wqe_index)
wq->tail += wqe_index - wq->last_comp; wq->tail += wqe_index - wq->last_comp;
else else
wq->tail += wqe_index + wq->max - wq->last_comp; wq->tail += wqe_index + wq->max - wq->last_comp;
wq->last_comp = wqe_index; wq->last_comp = wqe_index;
}
if (0)
mthca_dbg(dev, "%s completion for QP %06x, index %d (nr %d)\n",
is_send ? "Send" : "Receive",
(*cur_qp)->qpn, wqe_index, wq->max);
if (is_error) { if (is_error) {
err = handle_error_cqe(dev, cq, *cur_qp, wqe_index, is_send, err = handle_error_cqe(dev, cq, *cur_qp, wqe_index, is_send,
......
...@@ -218,6 +218,13 @@ struct mthca_cq_table { ...@@ -218,6 +218,13 @@ struct mthca_cq_table {
struct mthca_icm_table *table; struct mthca_icm_table *table;
}; };
struct mthca_srq_table {
struct mthca_alloc alloc;
spinlock_t lock;
struct mthca_array srq;
struct mthca_icm_table *table;
};
struct mthca_qp_table { struct mthca_qp_table {
struct mthca_alloc alloc; struct mthca_alloc alloc;
u32 rdb_base; u32 rdb_base;
...@@ -299,6 +306,7 @@ struct mthca_dev { ...@@ -299,6 +306,7 @@ struct mthca_dev {
struct mthca_mr_table mr_table; struct mthca_mr_table mr_table;
struct mthca_eq_table eq_table; struct mthca_eq_table eq_table;
struct mthca_cq_table cq_table; struct mthca_cq_table cq_table;
struct mthca_srq_table srq_table;
struct mthca_qp_table qp_table; struct mthca_qp_table qp_table;
struct mthca_av_table av_table; struct mthca_av_table av_table;
struct mthca_mcg_table mcg_table; struct mthca_mcg_table mcg_table;
...@@ -372,6 +380,7 @@ int mthca_init_pd_table(struct mthca_dev *dev); ...@@ -372,6 +380,7 @@ int mthca_init_pd_table(struct mthca_dev *dev);
int mthca_init_mr_table(struct mthca_dev *dev); int mthca_init_mr_table(struct mthca_dev *dev);
int mthca_init_eq_table(struct mthca_dev *dev); int mthca_init_eq_table(struct mthca_dev *dev);
int mthca_init_cq_table(struct mthca_dev *dev); int mthca_init_cq_table(struct mthca_dev *dev);
int mthca_init_srq_table(struct mthca_dev *dev);
int mthca_init_qp_table(struct mthca_dev *dev); int mthca_init_qp_table(struct mthca_dev *dev);
int mthca_init_av_table(struct mthca_dev *dev); int mthca_init_av_table(struct mthca_dev *dev);
int mthca_init_mcg_table(struct mthca_dev *dev); int mthca_init_mcg_table(struct mthca_dev *dev);
...@@ -381,6 +390,7 @@ void mthca_cleanup_pd_table(struct mthca_dev *dev); ...@@ -381,6 +390,7 @@ void mthca_cleanup_pd_table(struct mthca_dev *dev);
void mthca_cleanup_mr_table(struct mthca_dev *dev); void mthca_cleanup_mr_table(struct mthca_dev *dev);
void mthca_cleanup_eq_table(struct mthca_dev *dev); void mthca_cleanup_eq_table(struct mthca_dev *dev);
void mthca_cleanup_cq_table(struct mthca_dev *dev); void mthca_cleanup_cq_table(struct mthca_dev *dev);
void mthca_cleanup_srq_table(struct mthca_dev *dev);
void mthca_cleanup_qp_table(struct mthca_dev *dev); void mthca_cleanup_qp_table(struct mthca_dev *dev);
void mthca_cleanup_av_table(struct mthca_dev *dev); void mthca_cleanup_av_table(struct mthca_dev *dev);
void mthca_cleanup_mcg_table(struct mthca_dev *dev); void mthca_cleanup_mcg_table(struct mthca_dev *dev);
...@@ -431,7 +441,19 @@ int mthca_init_cq(struct mthca_dev *dev, int nent, ...@@ -431,7 +441,19 @@ int mthca_init_cq(struct mthca_dev *dev, int nent,
void mthca_free_cq(struct mthca_dev *dev, void mthca_free_cq(struct mthca_dev *dev,
struct mthca_cq *cq); struct mthca_cq *cq);
void mthca_cq_event(struct mthca_dev *dev, u32 cqn); void mthca_cq_event(struct mthca_dev *dev, u32 cqn);
void mthca_cq_clean(struct mthca_dev *dev, u32 cqn, u32 qpn); void mthca_cq_clean(struct mthca_dev *dev, u32 cqn, u32 qpn,
struct mthca_srq *srq);
int mthca_alloc_srq(struct mthca_dev *dev, struct mthca_pd *pd,
struct ib_srq_attr *attr, struct mthca_srq *srq);
void mthca_free_srq(struct mthca_dev *dev, struct mthca_srq *srq);
void mthca_srq_event(struct mthca_dev *dev, u32 srqn,
enum ib_event_type event_type);
void mthca_free_srq_wqe(struct mthca_srq *srq, u32 wqe_addr);
int mthca_tavor_post_srq_recv(struct ib_srq *srq, struct ib_recv_wr *wr,
struct ib_recv_wr **bad_wr);
int mthca_arbel_post_srq_recv(struct ib_srq *srq, struct ib_recv_wr *wr,
struct ib_recv_wr **bad_wr);
void mthca_qp_event(struct mthca_dev *dev, u32 qpn, void mthca_qp_event(struct mthca_dev *dev, u32 qpn,
enum ib_event_type event_type); enum ib_event_type event_type);
......
...@@ -253,6 +253,8 @@ static int __devinit mthca_init_tavor(struct mthca_dev *mdev) ...@@ -253,6 +253,8 @@ static int __devinit mthca_init_tavor(struct mthca_dev *mdev)
profile = default_profile; profile = default_profile;
profile.num_uar = dev_lim.uar_size / PAGE_SIZE; profile.num_uar = dev_lim.uar_size / PAGE_SIZE;
profile.uarc_size = 0; profile.uarc_size = 0;
if (mdev->mthca_flags & MTHCA_FLAG_SRQ)
profile.num_srq = dev_lim.max_srqs;
err = mthca_make_profile(mdev, &profile, &dev_lim, &init_hca); err = mthca_make_profile(mdev, &profile, &dev_lim, &init_hca);
if (err < 0) if (err < 0)
...@@ -433,6 +435,20 @@ static int __devinit mthca_init_icm(struct mthca_dev *mdev, ...@@ -433,6 +435,20 @@ static int __devinit mthca_init_icm(struct mthca_dev *mdev,
goto err_unmap_rdb; goto err_unmap_rdb;
} }
if (mdev->mthca_flags & MTHCA_FLAG_SRQ) {
mdev->srq_table.table =
mthca_alloc_icm_table(mdev, init_hca->srqc_base,
dev_lim->srq_entry_sz,
mdev->limits.num_srqs,
mdev->limits.reserved_srqs, 0);
if (!mdev->srq_table.table) {
mthca_err(mdev, "Failed to map SRQ context memory, "
"aborting.\n");
err = -ENOMEM;
goto err_unmap_cq;
}
}
/* /*
* It's not strictly required, but for simplicity just map the * It's not strictly required, but for simplicity just map the
* whole multicast group table now. The table isn't very big * whole multicast group table now. The table isn't very big
...@@ -448,11 +464,15 @@ static int __devinit mthca_init_icm(struct mthca_dev *mdev, ...@@ -448,11 +464,15 @@ static int __devinit mthca_init_icm(struct mthca_dev *mdev,
if (!mdev->mcg_table.table) { if (!mdev->mcg_table.table) {
mthca_err(mdev, "Failed to map MCG context memory, aborting.\n"); mthca_err(mdev, "Failed to map MCG context memory, aborting.\n");
err = -ENOMEM; err = -ENOMEM;
goto err_unmap_cq; goto err_unmap_srq;
} }
return 0; return 0;
err_unmap_srq:
if (mdev->mthca_flags & MTHCA_FLAG_SRQ)
mthca_free_icm_table(mdev, mdev->srq_table.table);
err_unmap_cq: err_unmap_cq:
mthca_free_icm_table(mdev, mdev->cq_table.table); mthca_free_icm_table(mdev, mdev->cq_table.table);
...@@ -532,6 +552,8 @@ static int __devinit mthca_init_arbel(struct mthca_dev *mdev) ...@@ -532,6 +552,8 @@ static int __devinit mthca_init_arbel(struct mthca_dev *mdev)
profile = default_profile; profile = default_profile;
profile.num_uar = dev_lim.uar_size / PAGE_SIZE; profile.num_uar = dev_lim.uar_size / PAGE_SIZE;
profile.num_udav = 0; profile.num_udav = 0;
if (mdev->mthca_flags & MTHCA_FLAG_SRQ)
profile.num_srq = dev_lim.max_srqs;
icm_size = mthca_make_profile(mdev, &profile, &dev_lim, &init_hca); icm_size = mthca_make_profile(mdev, &profile, &dev_lim, &init_hca);
if ((int) icm_size < 0) { if ((int) icm_size < 0) {
...@@ -558,6 +580,8 @@ static int __devinit mthca_init_arbel(struct mthca_dev *mdev) ...@@ -558,6 +580,8 @@ static int __devinit mthca_init_arbel(struct mthca_dev *mdev)
return 0; return 0;
err_free_icm: err_free_icm:
if (mdev->mthca_flags & MTHCA_FLAG_SRQ)
mthca_free_icm_table(mdev, mdev->srq_table.table);
mthca_free_icm_table(mdev, mdev->cq_table.table); mthca_free_icm_table(mdev, mdev->cq_table.table);
mthca_free_icm_table(mdev, mdev->qp_table.rdb_table); mthca_free_icm_table(mdev, mdev->qp_table.rdb_table);
mthca_free_icm_table(mdev, mdev->qp_table.eqp_table); mthca_free_icm_table(mdev, mdev->qp_table.eqp_table);
...@@ -587,6 +611,8 @@ static void mthca_close_hca(struct mthca_dev *mdev) ...@@ -587,6 +611,8 @@ static void mthca_close_hca(struct mthca_dev *mdev)
mthca_CLOSE_HCA(mdev, 0, &status); mthca_CLOSE_HCA(mdev, 0, &status);
if (mthca_is_memfree(mdev)) { if (mthca_is_memfree(mdev)) {
if (mdev->mthca_flags & MTHCA_FLAG_SRQ)
mthca_free_icm_table(mdev, mdev->srq_table.table);
mthca_free_icm_table(mdev, mdev->cq_table.table); mthca_free_icm_table(mdev, mdev->cq_table.table);
mthca_free_icm_table(mdev, mdev->qp_table.rdb_table); mthca_free_icm_table(mdev, mdev->qp_table.rdb_table);
mthca_free_icm_table(mdev, mdev->qp_table.eqp_table); mthca_free_icm_table(mdev, mdev->qp_table.eqp_table);
...@@ -731,11 +757,18 @@ static int __devinit mthca_setup_hca(struct mthca_dev *dev) ...@@ -731,11 +757,18 @@ static int __devinit mthca_setup_hca(struct mthca_dev *dev)
goto err_cmd_poll; goto err_cmd_poll;
} }
err = mthca_init_srq_table(dev);
if (err) {
mthca_err(dev, "Failed to initialize "
"shared receive queue table, aborting.\n");
goto err_cq_table_free;
}
err = mthca_init_qp_table(dev); err = mthca_init_qp_table(dev);
if (err) { if (err) {
mthca_err(dev, "Failed to initialize " mthca_err(dev, "Failed to initialize "
"queue pair table, aborting.\n"); "queue pair table, aborting.\n");
goto err_cq_table_free; goto err_srq_table_free;
} }
err = mthca_init_av_table(dev); err = mthca_init_av_table(dev);
...@@ -760,6 +793,9 @@ static int __devinit mthca_setup_hca(struct mthca_dev *dev) ...@@ -760,6 +793,9 @@ static int __devinit mthca_setup_hca(struct mthca_dev *dev)
err_qp_table_free: err_qp_table_free:
mthca_cleanup_qp_table(dev); mthca_cleanup_qp_table(dev);
err_srq_table_free:
mthca_cleanup_srq_table(dev);
err_cq_table_free: err_cq_table_free:
mthca_cleanup_cq_table(dev); mthca_cleanup_cq_table(dev);
...@@ -1046,6 +1082,7 @@ static int __devinit mthca_init_one(struct pci_dev *pdev, ...@@ -1046,6 +1082,7 @@ static int __devinit mthca_init_one(struct pci_dev *pdev,
mthca_cleanup_mcg_table(mdev); mthca_cleanup_mcg_table(mdev);
mthca_cleanup_av_table(mdev); mthca_cleanup_av_table(mdev);
mthca_cleanup_qp_table(mdev); mthca_cleanup_qp_table(mdev);
mthca_cleanup_srq_table(mdev);
mthca_cleanup_cq_table(mdev); mthca_cleanup_cq_table(mdev);
mthca_cmd_use_polling(mdev); mthca_cmd_use_polling(mdev);
mthca_cleanup_eq_table(mdev); mthca_cleanup_eq_table(mdev);
...@@ -1095,6 +1132,7 @@ static void __devexit mthca_remove_one(struct pci_dev *pdev) ...@@ -1095,6 +1132,7 @@ static void __devexit mthca_remove_one(struct pci_dev *pdev)
mthca_cleanup_mcg_table(mdev); mthca_cleanup_mcg_table(mdev);
mthca_cleanup_av_table(mdev); mthca_cleanup_av_table(mdev);
mthca_cleanup_qp_table(mdev); mthca_cleanup_qp_table(mdev);
mthca_cleanup_srq_table(mdev);
mthca_cleanup_cq_table(mdev); mthca_cleanup_cq_table(mdev);
mthca_cmd_use_polling(mdev); mthca_cmd_use_polling(mdev);
mthca_cleanup_eq_table(mdev); mthca_cleanup_eq_table(mdev);
......
...@@ -102,6 +102,7 @@ u64 mthca_make_profile(struct mthca_dev *dev, ...@@ -102,6 +102,7 @@ u64 mthca_make_profile(struct mthca_dev *dev,
profile[MTHCA_RES_UARC].size = request->uarc_size; profile[MTHCA_RES_UARC].size = request->uarc_size;
profile[MTHCA_RES_QP].num = request->num_qp; profile[MTHCA_RES_QP].num = request->num_qp;
profile[MTHCA_RES_SRQ].num = request->num_srq;
profile[MTHCA_RES_EQP].num = request->num_qp; profile[MTHCA_RES_EQP].num = request->num_qp;
profile[MTHCA_RES_RDB].num = request->num_qp * request->rdb_per_qp; profile[MTHCA_RES_RDB].num = request->num_qp * request->rdb_per_qp;
profile[MTHCA_RES_CQ].num = request->num_cq; profile[MTHCA_RES_CQ].num = request->num_cq;
......
...@@ -42,6 +42,7 @@ ...@@ -42,6 +42,7 @@
struct mthca_profile { struct mthca_profile {
int num_qp; int num_qp;
int rdb_per_qp; int rdb_per_qp;
int num_srq;
int num_cq; int num_cq;
int num_mcg; int num_mcg;
int num_mpt; int num_mpt;
......
...@@ -425,6 +425,77 @@ static int mthca_ah_destroy(struct ib_ah *ah) ...@@ -425,6 +425,77 @@ static int mthca_ah_destroy(struct ib_ah *ah)
return 0; return 0;
} }
static struct ib_srq *mthca_create_srq(struct ib_pd *pd,
struct ib_srq_init_attr *init_attr,
struct ib_udata *udata)
{
struct mthca_create_srq ucmd;
struct mthca_ucontext *context = NULL;
struct mthca_srq *srq;
int err;
srq = kmalloc(sizeof *srq, GFP_KERNEL);
if (!srq)
return ERR_PTR(-ENOMEM);
if (pd->uobject) {
context = to_mucontext(pd->uobject->context);
if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd))
return ERR_PTR(-EFAULT);
err = mthca_map_user_db(to_mdev(pd->device), &context->uar,
context->db_tab, ucmd.db_index,
ucmd.db_page);
if (err)
goto err_free;
srq->mr.ibmr.lkey = ucmd.lkey;
srq->db_index = ucmd.db_index;
}
err = mthca_alloc_srq(to_mdev(pd->device), to_mpd(pd),
&init_attr->attr, srq);
if (err && pd->uobject)
mthca_unmap_user_db(to_mdev(pd->device), &context->uar,
context->db_tab, ucmd.db_index);
if (err)
goto err_free;
if (context && ib_copy_to_udata(udata, &srq->srqn, sizeof (__u32))) {
mthca_free_srq(to_mdev(pd->device), srq);
err = -EFAULT;
goto err_free;
}
return &srq->ibsrq;
err_free:
kfree(srq);
return ERR_PTR(err);
}
static int mthca_destroy_srq(struct ib_srq *srq)
{
struct mthca_ucontext *context;
if (srq->uobject) {
context = to_mucontext(srq->uobject->context);
mthca_unmap_user_db(to_mdev(srq->device), &context->uar,
context->db_tab, to_msrq(srq)->db_index);
}
mthca_free_srq(to_mdev(srq->device), to_msrq(srq));
kfree(srq);
return 0;
}
static struct ib_qp *mthca_create_qp(struct ib_pd *pd, static struct ib_qp *mthca_create_qp(struct ib_pd *pd,
struct ib_qp_init_attr *init_attr, struct ib_qp_init_attr *init_attr,
struct ib_udata *udata) struct ib_udata *udata)
...@@ -1003,6 +1074,17 @@ int mthca_register_device(struct mthca_dev *dev) ...@@ -1003,6 +1074,17 @@ int mthca_register_device(struct mthca_dev *dev)
dev->ib_dev.dealloc_pd = mthca_dealloc_pd; dev->ib_dev.dealloc_pd = mthca_dealloc_pd;
dev->ib_dev.create_ah = mthca_ah_create; dev->ib_dev.create_ah = mthca_ah_create;
dev->ib_dev.destroy_ah = mthca_ah_destroy; dev->ib_dev.destroy_ah = mthca_ah_destroy;
if (dev->mthca_flags & MTHCA_FLAG_SRQ) {
dev->ib_dev.create_srq = mthca_create_srq;
dev->ib_dev.destroy_srq = mthca_destroy_srq;
if (mthca_is_memfree(dev))
dev->ib_dev.post_srq_recv = mthca_arbel_post_srq_recv;
else
dev->ib_dev.post_srq_recv = mthca_tavor_post_srq_recv;
}
dev->ib_dev.create_qp = mthca_create_qp; dev->ib_dev.create_qp = mthca_create_qp;
dev->ib_dev.modify_qp = mthca_modify_qp; dev->ib_dev.modify_qp = mthca_modify_qp;
dev->ib_dev.destroy_qp = mthca_destroy_qp; dev->ib_dev.destroy_qp = mthca_destroy_qp;
......
...@@ -197,6 +197,29 @@ struct mthca_cq { ...@@ -197,6 +197,29 @@ struct mthca_cq {
wait_queue_head_t wait; wait_queue_head_t wait;
}; };
struct mthca_srq {
struct ib_srq ibsrq;
spinlock_t lock;
atomic_t refcount;
int srqn;
int max;
int max_gs;
int wqe_shift;
int first_free;
int last_free;
u16 counter; /* Arbel only */
int db_index; /* Arbel only */
__be32 *db; /* Arbel only */
void *last;
int is_direct;
u64 *wrid;
union mthca_buf queue;
struct mthca_mr mr;
wait_queue_head_t wait;
};
struct mthca_wq { struct mthca_wq {
spinlock_t lock; spinlock_t lock;
int max; int max;
...@@ -277,6 +300,11 @@ static inline struct mthca_cq *to_mcq(struct ib_cq *ibcq) ...@@ -277,6 +300,11 @@ static inline struct mthca_cq *to_mcq(struct ib_cq *ibcq)
return container_of(ibcq, struct mthca_cq, ibcq); return container_of(ibcq, struct mthca_cq, ibcq);
} }
static inline struct mthca_srq *to_msrq(struct ib_srq *ibsrq)
{
return container_of(ibsrq, struct mthca_srq, ibsrq);
}
static inline struct mthca_qp *to_mqp(struct ib_qp *ibqp) static inline struct mthca_qp *to_mqp(struct ib_qp *ibqp)
{ {
return container_of(ibqp, struct mthca_qp, ibqp); return container_of(ibqp, struct mthca_qp, ibqp);
......
...@@ -612,10 +612,13 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask) ...@@ -612,10 +612,13 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
qp_context->mtu_msgmax = (attr->path_mtu << 5) | 31; qp_context->mtu_msgmax = (attr->path_mtu << 5) | 31;
if (mthca_is_memfree(dev)) { if (mthca_is_memfree(dev)) {
qp_context->rq_size_stride = if (qp->rq.max)
((ffs(qp->rq.max) - 1) << 3) | (qp->rq.wqe_shift - 4); qp_context->rq_size_stride = long_log2(qp->rq.max) << 3;
qp_context->sq_size_stride = qp_context->rq_size_stride |= qp->rq.wqe_shift - 4;
((ffs(qp->sq.max) - 1) << 3) | (qp->sq.wqe_shift - 4);
if (qp->sq.max)
qp_context->sq_size_stride = long_log2(qp->sq.max) << 3;
qp_context->sq_size_stride |= qp->sq.wqe_shift - 4;
} }
/* leave arbel_sched_queue as 0 */ /* leave arbel_sched_queue as 0 */
...@@ -784,6 +787,9 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask) ...@@ -784,6 +787,9 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
qp_context->params2 |= cpu_to_be32(MTHCA_QP_BIT_RSC); qp_context->params2 |= cpu_to_be32(MTHCA_QP_BIT_RSC);
if (ibqp->srq)
qp_context->params2 |= cpu_to_be32(MTHCA_QP_BIT_RIC);
if (attr_mask & IB_QP_MIN_RNR_TIMER) { if (attr_mask & IB_QP_MIN_RNR_TIMER) {
qp_context->rnr_nextrecvpsn |= cpu_to_be32(attr->min_rnr_timer << 24); qp_context->rnr_nextrecvpsn |= cpu_to_be32(attr->min_rnr_timer << 24);
qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RNR_TIMEOUT); qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RNR_TIMEOUT);
...@@ -806,6 +812,10 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask) ...@@ -806,6 +812,10 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_Q_KEY); qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_Q_KEY);
} }
if (ibqp->srq)
qp_context->srqn = cpu_to_be32(1 << 24 |
to_msrq(ibqp->srq)->srqn);
err = mthca_MODIFY_QP(dev, state_table[cur_state][new_state].trans, err = mthca_MODIFY_QP(dev, state_table[cur_state][new_state].trans,
qp->qpn, 0, mailbox, 0, &status); qp->qpn, 0, mailbox, 0, &status);
if (status) { if (status) {
...@@ -1260,9 +1270,11 @@ void mthca_free_qp(struct mthca_dev *dev, ...@@ -1260,9 +1270,11 @@ void mthca_free_qp(struct mthca_dev *dev,
* unref the mem-free tables and free the QPN in our table. * unref the mem-free tables and free the QPN in our table.
*/ */
if (!qp->ibqp.uobject) { if (!qp->ibqp.uobject) {
mthca_cq_clean(dev, to_mcq(qp->ibqp.send_cq)->cqn, qp->qpn); mthca_cq_clean(dev, to_mcq(qp->ibqp.send_cq)->cqn, qp->qpn,
qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL);
if (qp->ibqp.send_cq != qp->ibqp.recv_cq) if (qp->ibqp.send_cq != qp->ibqp.recv_cq)
mthca_cq_clean(dev, to_mcq(qp->ibqp.recv_cq)->cqn, qp->qpn); mthca_cq_clean(dev, to_mcq(qp->ibqp.recv_cq)->cqn, qp->qpn,
qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL);
mthca_free_memfree(dev, qp); mthca_free_memfree(dev, qp);
mthca_free_wqe_buf(dev, qp); mthca_free_wqe_buf(dev, qp);
...@@ -2008,6 +2020,15 @@ int mthca_free_err_wqe(struct mthca_dev *dev, struct mthca_qp *qp, int is_send, ...@@ -2008,6 +2020,15 @@ int mthca_free_err_wqe(struct mthca_dev *dev, struct mthca_qp *qp, int is_send,
{ {
struct mthca_next_seg *next; struct mthca_next_seg *next;
/*
* For SRQs, all WQEs generate a CQE, so we're always at the
* end of the doorbell chain.
*/
if (qp->ibqp.srq) {
*new_wqe = 0;
return 0;
}
if (is_send) if (is_send)
next = get_send_wqe(qp, index); next = get_send_wqe(qp, index);
else else
......
This diff is collapsed.
...@@ -69,6 +69,17 @@ struct mthca_create_cq_resp { ...@@ -69,6 +69,17 @@ struct mthca_create_cq_resp {
__u32 reserved; __u32 reserved;
}; };
struct mthca_create_srq {
__u32 lkey;
__u32 db_index;
__u64 db_page;
};
struct mthca_create_srq_resp {
__u32 srqn;
__u32 reserved;
};
struct mthca_create_qp { struct mthca_create_qp {
__u32 lkey; __u32 lkey;
__u32 reserved; __u32 reserved;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment