Commit 58fb0b56 authored by Bernard Metzler's avatar Bernard Metzler Committed by Jason Gunthorpe

RDMA/siw: Simplify QP representation

Change siw_qp to contain ib_qp. Use rdma_is_kernel_res() on contained
ib_qp to distinguish kernel level from user level applications
resources. Apply same mechanism for kernel/user level application
detection to completion queues.

Link: https://lore.kernel.org/r/20191210161729.31598-1-bmt@zurich.ibm.comSigned-off-by: default avatarBernard Metzler <bmt@zurich.ibm.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@mellanox.com>
parent 47688202
...@@ -7,6 +7,7 @@ ...@@ -7,6 +7,7 @@
#define _SIW_H #define _SIW_H
#include <rdma/ib_verbs.h> #include <rdma/ib_verbs.h>
#include <rdma/restrack.h>
#include <linux/socket.h> #include <linux/socket.h>
#include <linux/skbuff.h> #include <linux/skbuff.h>
#include <crypto/hash.h> #include <crypto/hash.h>
...@@ -209,7 +210,6 @@ struct siw_cq { ...@@ -209,7 +210,6 @@ struct siw_cq {
u32 cq_put; u32 cq_put;
u32 cq_get; u32 cq_get;
u32 num_cqe; u32 num_cqe;
bool kernel_verbs;
struct rdma_user_mmap_entry *cq_entry; /* mmap info for CQE array */ struct rdma_user_mmap_entry *cq_entry; /* mmap info for CQE array */
u32 id; /* For debugging only */ u32 id; /* For debugging only */
}; };
...@@ -254,8 +254,8 @@ struct siw_srq { ...@@ -254,8 +254,8 @@ struct siw_srq {
u32 rq_get; u32 rq_get;
u32 num_rqe; /* max # of wqe's allowed */ u32 num_rqe; /* max # of wqe's allowed */
struct rdma_user_mmap_entry *srq_entry; /* mmap info for SRQ array */ struct rdma_user_mmap_entry *srq_entry; /* mmap info for SRQ array */
char armed; /* inform user if limit hit */ bool armed:1; /* inform user if limit hit */
char kernel_verbs; /* '1' if kernel client */ bool is_kernel_res:1; /* true if kernel client */
}; };
struct siw_qp_attrs { struct siw_qp_attrs {
...@@ -418,13 +418,11 @@ struct siw_iwarp_tx { ...@@ -418,13 +418,11 @@ struct siw_iwarp_tx {
}; };
struct siw_qp { struct siw_qp {
struct ib_qp base_qp;
struct siw_device *sdev; struct siw_device *sdev;
struct ib_qp *ib_qp;
struct kref ref; struct kref ref;
u32 qp_num;
struct list_head devq; struct list_head devq;
int tx_cpu; int tx_cpu;
bool kernel_verbs;
struct siw_qp_attrs attrs; struct siw_qp_attrs attrs;
struct siw_cep *cep; struct siw_cep *cep;
...@@ -472,11 +470,6 @@ struct siw_qp { ...@@ -472,11 +470,6 @@ struct siw_qp {
struct rcu_head rcu; struct rcu_head rcu;
}; };
struct siw_base_qp {
struct ib_qp base_qp;
struct siw_qp *qp;
};
/* helper macros */ /* helper macros */
#define rx_qp(rx) container_of(rx, struct siw_qp, rx_stream) #define rx_qp(rx) container_of(rx, struct siw_qp, rx_stream)
#define tx_qp(tx) container_of(tx, struct siw_qp, tx_ctx) #define tx_qp(tx) container_of(tx, struct siw_qp, tx_ctx)
...@@ -572,14 +565,9 @@ static inline struct siw_ucontext *to_siw_ctx(struct ib_ucontext *base_ctx) ...@@ -572,14 +565,9 @@ static inline struct siw_ucontext *to_siw_ctx(struct ib_ucontext *base_ctx)
return container_of(base_ctx, struct siw_ucontext, base_ucontext); return container_of(base_ctx, struct siw_ucontext, base_ucontext);
} }
static inline struct siw_base_qp *to_siw_base_qp(struct ib_qp *base_qp)
{
return container_of(base_qp, struct siw_base_qp, base_qp);
}
static inline struct siw_qp *to_siw_qp(struct ib_qp *base_qp) static inline struct siw_qp *to_siw_qp(struct ib_qp *base_qp)
{ {
return to_siw_base_qp(base_qp)->qp; return container_of(base_qp, struct siw_qp, base_qp);
} }
static inline struct siw_cq *to_siw_cq(struct ib_cq *base_cq) static inline struct siw_cq *to_siw_cq(struct ib_cq *base_cq)
...@@ -624,7 +612,7 @@ static inline struct siw_qp *siw_qp_id2obj(struct siw_device *sdev, int id) ...@@ -624,7 +612,7 @@ static inline struct siw_qp *siw_qp_id2obj(struct siw_device *sdev, int id)
static inline u32 qp_id(struct siw_qp *qp) static inline u32 qp_id(struct siw_qp *qp)
{ {
return qp->qp_num; return qp->base_qp.qp_num;
} }
static inline void siw_qp_get(struct siw_qp *qp) static inline void siw_qp_get(struct siw_qp *qp)
...@@ -735,7 +723,7 @@ static inline void siw_crc_skb(struct siw_rx_stream *srx, unsigned int len) ...@@ -735,7 +723,7 @@ static inline void siw_crc_skb(struct siw_rx_stream *srx, unsigned int len)
"MEM[0x%08x] %s: " fmt, mem->stag, __func__, ##__VA_ARGS__) "MEM[0x%08x] %s: " fmt, mem->stag, __func__, ##__VA_ARGS__)
#define siw_dbg_cep(cep, fmt, ...) \ #define siw_dbg_cep(cep, fmt, ...) \
ibdev_dbg(&cep->sdev->base_dev, "CEP[0x%pK] %s: " fmt, \ ibdev_dbg(&cep->sdev->base_dev, "CEP[0x%pK] %s: " fmt, \
cep, __func__, ##__VA_ARGS__) cep, __func__, ##__VA_ARGS__)
void siw_cq_flush(struct siw_cq *cq); void siw_cq_flush(struct siw_cq *cq);
......
...@@ -65,7 +65,7 @@ int siw_reap_cqe(struct siw_cq *cq, struct ib_wc *wc) ...@@ -65,7 +65,7 @@ int siw_reap_cqe(struct siw_cq *cq, struct ib_wc *wc)
* reaped here, which do not hold a QP reference * reaped here, which do not hold a QP reference
* and do not qualify for memory extension verbs. * and do not qualify for memory extension verbs.
*/ */
if (likely(cq->kernel_verbs)) { if (likely(rdma_is_kernel_res(&cq->base_cq.res))) {
if (cqe->flags & SIW_WQE_REM_INVAL) { if (cqe->flags & SIW_WQE_REM_INVAL) {
wc->ex.invalidate_rkey = cqe->inval_stag; wc->ex.invalidate_rkey = cqe->inval_stag;
wc->wc_flags = IB_WC_WITH_INVALIDATE; wc->wc_flags = IB_WC_WITH_INVALIDATE;
......
...@@ -244,7 +244,7 @@ static struct ib_qp *siw_get_base_qp(struct ib_device *base_dev, int id) ...@@ -244,7 +244,7 @@ static struct ib_qp *siw_get_base_qp(struct ib_device *base_dev, int id)
* siw_qp_id2obj() increments object reference count * siw_qp_id2obj() increments object reference count
*/ */
siw_qp_put(qp); siw_qp_put(qp);
return qp->ib_qp; return &qp->base_qp;
} }
return NULL; return NULL;
} }
......
...@@ -1070,8 +1070,8 @@ int siw_sqe_complete(struct siw_qp *qp, struct siw_sqe *sqe, u32 bytes, ...@@ -1070,8 +1070,8 @@ int siw_sqe_complete(struct siw_qp *qp, struct siw_sqe *sqe, u32 bytes,
cqe->imm_data = 0; cqe->imm_data = 0;
cqe->bytes = bytes; cqe->bytes = bytes;
if (cq->kernel_verbs) if (rdma_is_kernel_res(&cq->base_cq.res))
cqe->base_qp = qp->ib_qp; cqe->base_qp = &qp->base_qp;
else else
cqe->qp_id = qp_id(qp); cqe->qp_id = qp_id(qp);
...@@ -1128,8 +1128,8 @@ int siw_rqe_complete(struct siw_qp *qp, struct siw_rqe *rqe, u32 bytes, ...@@ -1128,8 +1128,8 @@ int siw_rqe_complete(struct siw_qp *qp, struct siw_rqe *rqe, u32 bytes,
cqe->imm_data = 0; cqe->imm_data = 0;
cqe->bytes = bytes; cqe->bytes = bytes;
if (cq->kernel_verbs) { if (rdma_is_kernel_res(&cq->base_cq.res)) {
cqe->base_qp = qp->ib_qp; cqe->base_qp = &qp->base_qp;
if (inval_stag) { if (inval_stag) {
cqe_flags |= SIW_WQE_REM_INVAL; cqe_flags |= SIW_WQE_REM_INVAL;
cqe->inval_stag = inval_stag; cqe->inval_stag = inval_stag;
...@@ -1297,13 +1297,12 @@ void siw_rq_flush(struct siw_qp *qp) ...@@ -1297,13 +1297,12 @@ void siw_rq_flush(struct siw_qp *qp)
int siw_qp_add(struct siw_device *sdev, struct siw_qp *qp) int siw_qp_add(struct siw_device *sdev, struct siw_qp *qp)
{ {
int rv = xa_alloc(&sdev->qp_xa, &qp->ib_qp->qp_num, qp, xa_limit_32b, int rv = xa_alloc(&sdev->qp_xa, &qp->base_qp.qp_num, qp, xa_limit_32b,
GFP_KERNEL); GFP_KERNEL);
if (!rv) { if (!rv) {
kref_init(&qp->ref); kref_init(&qp->ref);
qp->sdev = sdev; qp->sdev = sdev;
qp->qp_num = qp->ib_qp->qp_num;
siw_dbg_qp(qp, "new QP\n"); siw_dbg_qp(qp, "new QP\n");
} }
return rv; return rv;
...@@ -1312,7 +1311,6 @@ int siw_qp_add(struct siw_device *sdev, struct siw_qp *qp) ...@@ -1312,7 +1311,6 @@ int siw_qp_add(struct siw_device *sdev, struct siw_qp *qp)
void siw_free_qp(struct kref *ref) void siw_free_qp(struct kref *ref)
{ {
struct siw_qp *found, *qp = container_of(ref, struct siw_qp, ref); struct siw_qp *found, *qp = container_of(ref, struct siw_qp, ref);
struct siw_base_qp *siw_base_qp = to_siw_base_qp(qp->ib_qp);
struct siw_device *sdev = qp->sdev; struct siw_device *sdev = qp->sdev;
unsigned long flags; unsigned long flags;
...@@ -1335,5 +1333,4 @@ void siw_free_qp(struct kref *ref) ...@@ -1335,5 +1333,4 @@ void siw_free_qp(struct kref *ref)
atomic_dec(&sdev->num_qp); atomic_dec(&sdev->num_qp);
siw_dbg_qp(qp, "free QP\n"); siw_dbg_qp(qp, "free QP\n");
kfree_rcu(qp, rcu); kfree_rcu(qp, rcu);
kfree(siw_base_qp);
} }
...@@ -68,7 +68,7 @@ static int siw_rx_umem(struct siw_rx_stream *srx, struct siw_umem *umem, ...@@ -68,7 +68,7 @@ static int siw_rx_umem(struct siw_rx_stream *srx, struct siw_umem *umem,
return -EFAULT; return -EFAULT;
} }
if (srx->mpa_crc_hd) { if (srx->mpa_crc_hd) {
if (rx_qp(srx)->kernel_verbs) { if (rdma_is_kernel_res(&rx_qp(srx)->base_qp.res)) {
crypto_shash_update(srx->mpa_crc_hd, crypto_shash_update(srx->mpa_crc_hd,
(u8 *)(dest + pg_off), bytes); (u8 *)(dest + pg_off), bytes);
kunmap_atomic(dest); kunmap_atomic(dest);
...@@ -388,7 +388,7 @@ static struct siw_wqe *siw_rqe_get(struct siw_qp *qp) ...@@ -388,7 +388,7 @@ static struct siw_wqe *siw_rqe_get(struct siw_qp *qp)
struct siw_rqe *rqe2 = &srq->recvq[off]; struct siw_rqe *rqe2 = &srq->recvq[off];
if (!(rqe2->flags & SIW_WQE_VALID)) { if (!(rqe2->flags & SIW_WQE_VALID)) {
srq->armed = 0; srq->armed = false;
srq_event = true; srq_event = true;
} }
} }
...@@ -1264,7 +1264,7 @@ static int siw_rdmap_complete(struct siw_qp *qp, int error) ...@@ -1264,7 +1264,7 @@ static int siw_rdmap_complete(struct siw_qp *qp, int error)
if (wc_status == SIW_WC_SUCCESS) if (wc_status == SIW_WC_SUCCESS)
wc_status = SIW_WC_GENERAL_ERR; wc_status = SIW_WC_GENERAL_ERR;
} else if (qp->kernel_verbs && } else if (rdma_is_kernel_res(&qp->base_qp.res) &&
rx_type(wqe) == SIW_OP_READ_LOCAL_INV) { rx_type(wqe) == SIW_OP_READ_LOCAL_INV) {
/* /*
* Handle any STag invalidation request * Handle any STag invalidation request
......
...@@ -817,7 +817,7 @@ static int siw_qp_sq_proc_tx(struct siw_qp *qp, struct siw_wqe *wqe) ...@@ -817,7 +817,7 @@ static int siw_qp_sq_proc_tx(struct siw_qp *qp, struct siw_wqe *wqe)
} }
} else { } else {
wqe->bytes = wqe->sqe.sge[0].length; wqe->bytes = wqe->sqe.sge[0].length;
if (!qp->kernel_verbs) { if (!rdma_is_kernel_res(&qp->base_qp.res)) {
if (wqe->bytes > SIW_MAX_INLINE) { if (wqe->bytes > SIW_MAX_INLINE) {
rv = -EINVAL; rv = -EINVAL;
goto tx_error; goto tx_error;
......
...@@ -303,7 +303,6 @@ struct ib_qp *siw_create_qp(struct ib_pd *pd, ...@@ -303,7 +303,6 @@ struct ib_qp *siw_create_qp(struct ib_pd *pd,
struct ib_udata *udata) struct ib_udata *udata)
{ {
struct siw_qp *qp = NULL; struct siw_qp *qp = NULL;
struct siw_base_qp *siw_base_qp = NULL;
struct ib_device *base_dev = pd->device; struct ib_device *base_dev = pd->device;
struct siw_device *sdev = to_siw_dev(base_dev); struct siw_device *sdev = to_siw_dev(base_dev);
struct siw_ucontext *uctx = struct siw_ucontext *uctx =
...@@ -357,26 +356,16 @@ struct ib_qp *siw_create_qp(struct ib_pd *pd, ...@@ -357,26 +356,16 @@ struct ib_qp *siw_create_qp(struct ib_pd *pd,
rv = -EINVAL; rv = -EINVAL;
goto err_out; goto err_out;
} }
siw_base_qp = kzalloc(sizeof(*siw_base_qp), GFP_KERNEL);
if (!siw_base_qp) {
rv = -ENOMEM;
goto err_out;
}
qp = kzalloc(sizeof(*qp), GFP_KERNEL); qp = kzalloc(sizeof(*qp), GFP_KERNEL);
if (!qp) { if (!qp) {
rv = -ENOMEM; rv = -ENOMEM;
goto err_out; goto err_out;
} }
siw_base_qp->qp = qp;
qp->ib_qp = &siw_base_qp->base_qp;
init_rwsem(&qp->state_lock); init_rwsem(&qp->state_lock);
spin_lock_init(&qp->sq_lock); spin_lock_init(&qp->sq_lock);
spin_lock_init(&qp->rq_lock); spin_lock_init(&qp->rq_lock);
spin_lock_init(&qp->orq_lock); spin_lock_init(&qp->orq_lock);
qp->kernel_verbs = !udata;
rv = siw_qp_add(sdev, qp); rv = siw_qp_add(sdev, qp);
if (rv) if (rv)
goto err_out; goto err_out;
...@@ -389,10 +378,10 @@ struct ib_qp *siw_create_qp(struct ib_pd *pd, ...@@ -389,10 +378,10 @@ struct ib_qp *siw_create_qp(struct ib_pd *pd,
num_sqe = roundup_pow_of_two(attrs->cap.max_send_wr); num_sqe = roundup_pow_of_two(attrs->cap.max_send_wr);
num_rqe = roundup_pow_of_two(attrs->cap.max_recv_wr); num_rqe = roundup_pow_of_two(attrs->cap.max_recv_wr);
if (qp->kernel_verbs) if (udata)
qp->sendq = vzalloc(num_sqe * sizeof(struct siw_sqe));
else
qp->sendq = vmalloc_user(num_sqe * sizeof(struct siw_sqe)); qp->sendq = vmalloc_user(num_sqe * sizeof(struct siw_sqe));
else
qp->sendq = vzalloc(num_sqe * sizeof(struct siw_sqe));
if (qp->sendq == NULL) { if (qp->sendq == NULL) {
siw_dbg(base_dev, "SQ size %d alloc failed\n", num_sqe); siw_dbg(base_dev, "SQ size %d alloc failed\n", num_sqe);
...@@ -419,13 +408,14 @@ struct ib_qp *siw_create_qp(struct ib_pd *pd, ...@@ -419,13 +408,14 @@ struct ib_qp *siw_create_qp(struct ib_pd *pd,
*/ */
qp->srq = to_siw_srq(attrs->srq); qp->srq = to_siw_srq(attrs->srq);
qp->attrs.rq_size = 0; qp->attrs.rq_size = 0;
siw_dbg(base_dev, "QP [%u]: SRQ attached\n", qp->qp_num); siw_dbg(base_dev, "QP [%u]: SRQ attached\n",
qp->base_qp.qp_num);
} else if (num_rqe) { } else if (num_rqe) {
if (qp->kernel_verbs) if (udata)
qp->recvq = vzalloc(num_rqe * sizeof(struct siw_rqe));
else
qp->recvq = qp->recvq =
vmalloc_user(num_rqe * sizeof(struct siw_rqe)); vmalloc_user(num_rqe * sizeof(struct siw_rqe));
else
qp->recvq = vzalloc(num_rqe * sizeof(struct siw_rqe));
if (qp->recvq == NULL) { if (qp->recvq == NULL) {
siw_dbg(base_dev, "RQ size %d alloc failed\n", num_rqe); siw_dbg(base_dev, "RQ size %d alloc failed\n", num_rqe);
...@@ -492,13 +482,11 @@ struct ib_qp *siw_create_qp(struct ib_pd *pd, ...@@ -492,13 +482,11 @@ struct ib_qp *siw_create_qp(struct ib_pd *pd,
list_add_tail(&qp->devq, &sdev->qp_list); list_add_tail(&qp->devq, &sdev->qp_list);
spin_unlock_irqrestore(&sdev->lock, flags); spin_unlock_irqrestore(&sdev->lock, flags);
return qp->ib_qp; return &qp->base_qp;
err_out_xa: err_out_xa:
xa_erase(&sdev->qp_xa, qp_id(qp)); xa_erase(&sdev->qp_xa, qp_id(qp));
err_out: err_out:
kfree(siw_base_qp);
if (qp) { if (qp) {
if (uctx) { if (uctx) {
rdma_user_mmap_entry_remove(qp->sq_entry); rdma_user_mmap_entry_remove(qp->sq_entry);
...@@ -742,7 +730,7 @@ int siw_post_send(struct ib_qp *base_qp, const struct ib_send_wr *wr, ...@@ -742,7 +730,7 @@ int siw_post_send(struct ib_qp *base_qp, const struct ib_send_wr *wr,
unsigned long flags; unsigned long flags;
int rv = 0; int rv = 0;
if (wr && !qp->kernel_verbs) { if (wr && !rdma_is_kernel_res(&qp->base_qp.res)) {
siw_dbg_qp(qp, "wr must be empty for user mapped sq\n"); siw_dbg_qp(qp, "wr must be empty for user mapped sq\n");
*bad_wr = wr; *bad_wr = wr;
return -EINVAL; return -EINVAL;
...@@ -939,7 +927,7 @@ int siw_post_send(struct ib_qp *base_qp, const struct ib_send_wr *wr, ...@@ -939,7 +927,7 @@ int siw_post_send(struct ib_qp *base_qp, const struct ib_send_wr *wr,
if (rv <= 0) if (rv <= 0)
goto skip_direct_sending; goto skip_direct_sending;
if (qp->kernel_verbs) { if (rdma_is_kernel_res(&qp->base_qp.res)) {
rv = siw_sq_start(qp); rv = siw_sq_start(qp);
} else { } else {
qp->tx_ctx.in_syscall = 1; qp->tx_ctx.in_syscall = 1;
...@@ -984,8 +972,8 @@ int siw_post_receive(struct ib_qp *base_qp, const struct ib_recv_wr *wr, ...@@ -984,8 +972,8 @@ int siw_post_receive(struct ib_qp *base_qp, const struct ib_recv_wr *wr,
*bad_wr = wr; *bad_wr = wr;
return -EOPNOTSUPP; /* what else from errno.h? */ return -EOPNOTSUPP; /* what else from errno.h? */
} }
if (!qp->kernel_verbs) { if (!rdma_is_kernel_res(&qp->base_qp.res)) {
siw_dbg_qp(qp, "no kernel post_recv for user mapped sq\n"); siw_dbg_qp(qp, "no kernel post_recv for user mapped rq\n");
*bad_wr = wr; *bad_wr = wr;
return -EINVAL; return -EINVAL;
} }
...@@ -1127,14 +1115,13 @@ int siw_create_cq(struct ib_cq *base_cq, const struct ib_cq_init_attr *attr, ...@@ -1127,14 +1115,13 @@ int siw_create_cq(struct ib_cq *base_cq, const struct ib_cq_init_attr *attr,
cq->base_cq.cqe = size; cq->base_cq.cqe = size;
cq->num_cqe = size; cq->num_cqe = size;
if (!udata) { if (udata)
cq->kernel_verbs = 1;
cq->queue = vzalloc(size * sizeof(struct siw_cqe) +
sizeof(struct siw_cq_ctrl));
} else {
cq->queue = vmalloc_user(size * sizeof(struct siw_cqe) + cq->queue = vmalloc_user(size * sizeof(struct siw_cqe) +
sizeof(struct siw_cq_ctrl)); sizeof(struct siw_cq_ctrl));
} else
cq->queue = vzalloc(size * sizeof(struct siw_cqe) +
sizeof(struct siw_cq_ctrl));
if (cq->queue == NULL) { if (cq->queue == NULL) {
rv = -ENOMEM; rv = -ENOMEM;
goto err_out; goto err_out;
...@@ -1589,9 +1576,9 @@ int siw_create_srq(struct ib_srq *base_srq, ...@@ -1589,9 +1576,9 @@ int siw_create_srq(struct ib_srq *base_srq,
srq->num_rqe = roundup_pow_of_two(attrs->max_wr); srq->num_rqe = roundup_pow_of_two(attrs->max_wr);
srq->limit = attrs->srq_limit; srq->limit = attrs->srq_limit;
if (srq->limit) if (srq->limit)
srq->armed = 1; srq->armed = true;
srq->kernel_verbs = !udata; srq->is_kernel_res = !udata;
if (udata) if (udata)
srq->recvq = srq->recvq =
...@@ -1671,9 +1658,9 @@ int siw_modify_srq(struct ib_srq *base_srq, struct ib_srq_attr *attrs, ...@@ -1671,9 +1658,9 @@ int siw_modify_srq(struct ib_srq *base_srq, struct ib_srq_attr *attrs,
rv = -EINVAL; rv = -EINVAL;
goto out; goto out;
} }
srq->armed = 1; srq->armed = true;
} else { } else {
srq->armed = 0; srq->armed = false;
} }
srq->limit = attrs->srq_limit; srq->limit = attrs->srq_limit;
} }
...@@ -1745,7 +1732,7 @@ int siw_post_srq_recv(struct ib_srq *base_srq, const struct ib_recv_wr *wr, ...@@ -1745,7 +1732,7 @@ int siw_post_srq_recv(struct ib_srq *base_srq, const struct ib_recv_wr *wr,
unsigned long flags; unsigned long flags;
int rv = 0; int rv = 0;
if (unlikely(!srq->kernel_verbs)) { if (unlikely(!srq->is_kernel_res)) {
siw_dbg_pd(base_srq->pd, siw_dbg_pd(base_srq->pd,
"[SRQ]: no kernel post_recv for mapped srq\n"); "[SRQ]: no kernel post_recv for mapped srq\n");
rv = -EINVAL; rv = -EINVAL;
...@@ -1797,7 +1784,7 @@ int siw_post_srq_recv(struct ib_srq *base_srq, const struct ib_recv_wr *wr, ...@@ -1797,7 +1784,7 @@ int siw_post_srq_recv(struct ib_srq *base_srq, const struct ib_recv_wr *wr,
void siw_qp_event(struct siw_qp *qp, enum ib_event_type etype) void siw_qp_event(struct siw_qp *qp, enum ib_event_type etype)
{ {
struct ib_event event; struct ib_event event;
struct ib_qp *base_qp = qp->ib_qp; struct ib_qp *base_qp = &qp->base_qp;
/* /*
* Do not report asynchronous errors on QP which gets * Do not report asynchronous errors on QP which gets
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment